diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 000000000000..7179616d0138 --- /dev/null +++ b/.codespellrc @@ -0,0 +1,8 @@ +[codespell] +skip = .git,*.pdf,*.svg,versioneer.py,package-lock.json,_vendor,*.css,.codespellrc +# from https://github.com/PrefectHQ/prefect/pull/10813#issuecomment-1732676130 +ignore-regex = .*lazy=\"selectin\"|.*e import Bloc$|America/Nome + +ignore-words-list = selectin,aci,wqs,aks,ines,dependant,fsspec,automations,nmme + +check-hidden = true \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0b1b7688f1fc..bd2c83355cfb 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,10 +2,10 @@ * @PrefectHQ/open-source # backend -/src/prefect/server @PrefectHQ/open-source @zangell44 +/src/prefect/server @PrefectHQ/open-source @zangell44 # ui -/ui @pleek91 +/ui @PrefectHQ/frontend # documentation /docs @PrefectHQ/docs @@ -13,7 +13,15 @@ mkdocs.yml @PrefectHQ/docs mkdocs.insiders.yml @PrefectHQ/docs # orchestration rules / policies -/src/prefect/server/orchestration @PrefectHQ/open-source +/src/prefect/server/orchestration @PrefectHQ/open-source # database configuration / models /src/prefect/server/database @PrefectHQ/open-source + +# the events subsystem, while it's being ported +/src/prefect/events @chrisguidry +/src/prefect/server/events @chrisguidry +/tests/events @chrisguidry + +# integrations +/src/integrations @desertaxle @zzstoatzz diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 000000000000..83fed516accc --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contributing + +Thanks for considering contributing to Prefect! + +To navigate our codebase with confidence, see our [contribution guidelines](https://docs.prefect.io/latest/contributing/overview/). diff --git a/.github/ISSUE_TEMPLATE/1_general_bug_report.yaml b/.github/ISSUE_TEMPLATE/1_general_bug_report.yaml index 606b245e77c6..108e96f59502 100644 --- a/.github/ISSUE_TEMPLATE/1_general_bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/1_general_bug_report.yaml @@ -1,6 +1,6 @@ name: 🐞 Report a bug description: Errors and regression reports with complete reproducing test cases and/or stack traces. -labels: ["status:triage", "bug"] +labels: ["needs:triage", "bug"] body: - type: markdown attributes: @@ -10,7 +10,11 @@ body: or [Discourse](https://discourse.prefect.io/) and ask there first. You are likely to get a response faster and learn more about the feature you're working with. If the issue is determined to be a bug, we will open an issue here. - + + GitHub issues raised against this repository will receive community support. If you have an + [active support agreement](https://www.prefect.io/pricing/), we recommend creating a case to ensure + a faster response. + - type: markdown attributes: value: > @@ -75,8 +79,8 @@ body: description: > Provide information about your Prefect version and environment. The easiest way to retrieve all of the information we require is the `prefect version` command. If using Prefect 1.x, it is useful to also include the output of `prefect diagnostics`. - Please do not just write "2.0". The command provides additional context such as your operating system, Prefect API type, Python version, etc. that we need to diagnose your problem. - placeholder: "# Copy output of the `prefect version` command here" + **Please do not just include your Prefect version number**. The command provides additional context such as your operating system, Prefect API type, Python version, etc. that we need to diagnose your problem. + placeholder: "# Copy output of the `prefect version` command here. Do not just include your Prefect version number." render: Text validations: required: true diff --git a/.github/ISSUE_TEMPLATE/2_ui_bug_report.yaml b/.github/ISSUE_TEMPLATE/2_ui_bug_report.yaml index 5b822209a63a..b41721ad8a11 100644 --- a/.github/ISSUE_TEMPLATE/2_ui_bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/2_ui_bug_report.yaml @@ -1,6 +1,6 @@ name: 🖼️ Report a bug with the Prefect UI description: Errors and display issues with the Prefect web interface. -labels: ["status:triage", "ui", "bug"] +labels: ["needs:triage", "ui", "bug"] body: - type: markdown attributes: @@ -21,6 +21,10 @@ body: 4. Additional details that may help us reproduce your issue. + GitHub issues raised against this repository will receive community support. If you have an + [active support agreement](https://www.prefect.io/pricing/), we recommend creating a case to ensure + a faster response. + - type: checkboxes id: checks attributes: @@ -62,7 +66,7 @@ body: - type: checkboxes attributes: - label: Browers + label: Browsers description: Which browsers was this bug reproduced on? Please check if your issue is specific to your browser by testing on another browser. options: - label: Chrome diff --git a/.github/ISSUE_TEMPLATE/3_feature_enhancement.yaml b/.github/ISSUE_TEMPLATE/3_feature_enhancement.yaml index 155a3569dd72..81bb5c687d4e 100644 --- a/.github/ISSUE_TEMPLATE/3_feature_enhancement.yaml +++ b/.github/ISSUE_TEMPLATE/3_feature_enhancement.yaml @@ -1,6 +1,6 @@ name: 🚀 Propose a feature enhancement description: Propose a new feature or an enhancement to an existing feature. -labels: ["status:triage", "enhancement"] +labels: ["needs:triage", "enhancement"] body: - type: checkboxes id: checks diff --git a/.github/ISSUE_TEMPLATE/4_docs_change.yaml b/.github/ISSUE_TEMPLATE/4_docs_change.yaml index 03fbb28bde4c..30650bf130ff 100644 --- a/.github/ISSUE_TEMPLATE/4_docs_change.yaml +++ b/.github/ISSUE_TEMPLATE/4_docs_change.yaml @@ -1,6 +1,6 @@ name: 📝 Suggest a change to documentation description: Propose edits, enhancements, or fixes to Prefect documentation -labels: ["docs", "status:triage"] +labels: ["docs", "needs:triage"] body: - type: checkboxes id: checks diff --git a/.github/ISSUE_TEMPLATE/5_maintenance_ticket.yaml b/.github/ISSUE_TEMPLATE/5_maintenance_ticket.yaml index dba85b54d530..df4507a5ded1 100644 --- a/.github/ISSUE_TEMPLATE/5_maintenance_ticket.yaml +++ b/.github/ISSUE_TEMPLATE/5_maintenance_ticket.yaml @@ -1,6 +1,6 @@ -name: 🛠️ Track a maintenence task +name: 🛠️ Track a maintenance task description: These are for changes that are not user or product related for maintenance of this repository. -labels: ["status:backlog", "maintenance"] +labels: ["maintenance"] body: - type: checkboxes id: checks diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index e0719e12003e..c0cadef161f1 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -10,5 +10,8 @@ contact_links: url: https://www.prefect.io/slack about: Join thousands of Prefect experts, engineers, and users in our Slack community - name: 🦆 Book a rubber duck - url: https://calendly.com/prefect-experts/rubber-duck - about: Book time with a Prefect engineer! + url: https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs + about: Schedule a meeting with a Prefect Product Advocate! + - name: 🔒 Report a security issue + url: https://www.prefect.io/security/bug-bounty-program/ + about: Responsibly disclose a potential security issue. diff --git a/.github/codeql-config.yml b/.github/codeql-config.yml index 8d27bfc50f5e..b60c9a89b004 100644 --- a/.github/codeql-config.yml +++ b/.github/codeql-config.yml @@ -1,23 +1,7 @@ query-filters: - - exclude: - # this creates many false positives with our code - id: py/unsafe-cyclic-import - - exclude: - id: py/cyclic-import - - exclude: - id: py/unreachable-statement - exclude: # catching base exceptions is required id: py/catch-base-exception - - exclude: - # too many false positives with prefect.runtime - id: py/undefined-export - - exclude: - # sometimes necessary for __init__ files - id: py/import-and-import-from - - exclude: - # we dont need CodeQL quality linting - id: py/mixed-returns paths-ignore: - tests/**/test_*.py diff --git a/.github/labeler.yml b/.github/labeler.yml index 0f155f83647a..7f39e8af16e6 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,2 +1,3 @@ migration: - - src/**/migrations/**/*.py + - changed-files: + - any-glob-to-any-file: src/**/migrations/**/*.py diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 9bdf84a0ad8b..43542a6c1631 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -5,6 +5,7 @@ Thanks for opening a pull request to Prefect! We've got a few requests to help u - Provide a short overview of the change and the value it adds. - Share an example to help us understand the change in user experience. - Confirm that you've done common tasks so we can give a timely review. +- Review our contribution guidelines: https://docs.prefect.io/latest/contributing/overview/ Happy engineering! --> @@ -23,7 +24,16 @@ If changing documentation, a link to a preview of the page is great. - [ ] This pull request references any related issue by including "closes ``" - - If no issue exists and your change is not a small fix, please [create an issue](https://github.com/PrefectHQ/prefect/issues/new/choose) first. -- [ ] This pull request includes tests or only affects documentation. -- [ ] This pull request includes a label categorizing the change e.g. `fix`, `feature`, `enhancement`, `docs`. + - If no issue exists and your change is not a small fix, please [create an issue](https://github.com/PrefectHQ/prefect/issues/new/choose) first. +- [ ] If this pull request adds new functionality, it includes unit tests that cover the changes +- [ ] This pull request includes a label categorizing the change e.g. `maintenance`, `fix`, `feature`, `enhancement`, `docs`. + +For documentation changes: + +- [ ] This pull request includes redirect settings in `mint.json` for files that are removed or renamed. + +For new functions or classes in the Python SDK: + +- [ ] This pull request includes helpful docstrings. +- [ ] If a new Python file was added, this pull request contains a stub page in the Python SDK docs and an entry in `docs/mint.json` navigation. diff --git a/.github/workflows/api-compatibility-tests.yaml b/.github/workflows/api-compatibility-tests.yaml new file mode 100644 index 000000000000..6276a0f45ba3 --- /dev/null +++ b/.github/workflows/api-compatibility-tests.yaml @@ -0,0 +1,74 @@ +--- +name: Cloud API Compatibility +on: + pull_request: + paths: + - .github/workflows/api-compatibility-tests.yaml + - "**/*.py" + - requirements*.txt + - setup.cfg + - compat-tests + push: + branches: + - main + +# Limit concurrency by workflow/branch combination. +# +# For pull request builds, pushing additional changes to the +# branch will cancel prior in-progress and pending builds. +# +# For builds triggered on a branch push, additional changes +# will wait for prior builds to complete before starting. +# +# https://docs.github.com/en/actions/using-jobs/using-concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + compatibility-tests: + + timeout-minutes: 10 + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + with: + # Versioneer only generates correct versions with a full fetch + fetch-depth: 0 + persist-credentials: false + submodules: true + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + id: setup_python + with: + python-version: 3.12 + + - name: UV Cache + # Manually cache the uv cache directory + # until setup-python supports it: + # https://github.com/actions/setup-python/issues/822 + uses: actions/cache@v4 + id: cache-uv + with: + path: ~/.cache/uv + key: uvcache-${{ runner.os }}-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles('requirements-client.txt', 'requirements.txt', 'requirements-dev.txt') }} + + - name: Install packages + run: | + python -m pip install -U uv + uv pip install --upgrade --system -e .[dev] 'pydantic>=2.4,<3' + + - name: Create Cloud OpenAPI JSON + working-directory: compat-tests + run: curl https://api.prefect.cloud/api/openapi.json > cloud_schema.json + + - name: Create OSS OpenAPI JSON + working-directory: compat-tests + run: python ../scripts/generate_oss_openapi_schema.py + + - name: Run API compatibility tests + working-directory: compat-tests + run: pytest -vv diff --git a/.github/workflows/benchmarks.yaml b/.github/workflows/benchmarks.yaml index de9beb022a1b..d0fd5080c6f0 100644 --- a/.github/workflows/benchmarks.yaml +++ b/.github/workflows/benchmarks.yaml @@ -6,8 +6,9 @@ env: on: pull_request: paths: + - .github/workflows/benchmarks.yaml - .github/workflows/python-tests.yaml - - "**/*.py" + - "src/prefect/**/*.py" - requirements.txt - requirements-dev.txt - setup.cfg @@ -28,36 +29,45 @@ jobs: name: Benchmark # Runs with ephemeral API and SQLite database right now - runs-on: ubuntu-latest + runs-on: + group: oss-larger-runners timeout-minutes: 20 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: persist-credentials: false fetch-depth: 0 - name: Set up Docker Buildx if: ${{ matrix.build-docker-images }} - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 + with: + driver-opts: image=moby/buildkit:v0.12.5 - name: Set up Python 3.10 - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.10" - cache: "pip" - cache-dependency-path: "requirements*.txt" + + - name: UV Cache + uses: actions/cache@v4 + id: cache-uv + with: + path: ~/.cache/uv + key: uvcache-${{ runner.os }}-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles('requirements-client.txt', 'requirements.txt', 'requirements-dev.txt') }} + - name: Install packages run: | - python -m pip install --upgrade pip - pip install --upgrade --upgrade-strategy eager -e ".[dev]" + python -m pip install -U uv + uv pip install --upgrade --system -e .[dev] - name: Prepare benchmark comparisons # Note: We use a "cache" instead of artifacts because artifacts are not available # across workflow runs. id: bench-cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ./.benchmarks # Pushes benchmark results for this branch and sha, this will always be a cache miss @@ -68,7 +78,6 @@ jobs: ${{ runner.os }}-${{ github.base_ref }}- ${{ runner.os }}-main- - - name: Start server run: | prefect server start& @@ -78,12 +87,25 @@ jobs: # https://github.com/PrefectHQ/prefect/issues/6990 - name: Run benchmarks + env: + HEAD_REF: ${{ github.head_ref }} + GITHUB_SHA: ${{ github.sha }} # Includes comparison to previous benchmarks if available - run: > - uniquename=${{ github.head_ref || 'main' }}-${{ github.sha }} + run: | + if [[ -z "$HEAD_REF" ]]; then + # HEAD_REF is unset or empty, use 'main' with the SHA + uniquename="main-$GITHUB_SHA" + else + # HEAD_REF is set, use the branch name directly + uniquename="$HEAD_REF" + fi + + # Allow alphanumeric, underscores, and dashes, and replace other + # characters with an underscore + sanitized_uniquename="${uniquename//[^a-zA-Z0-9_\-]/_}" PREFECT_API_URL="http://127.0.0.1:4200/api" - python benches - --benchmark-save="${uniquename//\//_}" - ${{ steps.bench-cache.outputs.cache-hit && '--benchmark-compare' || ''}} - + python benches \ + --timeout=180 \ + --benchmark-save="${sanitized_uniquename}" \ + ${{ steps.bench-cache.outputs.cache-hit && '--benchmark-compare' || '' }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index aafd2f7fd2fe..1a7fff3ad090 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -39,7 +39,7 @@ on: # # https://docs.github.com/en/actions/using-jobs/using-concurrency concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} # Do not grant jobs any permissions by default @@ -57,7 +57,7 @@ jobs: security-events: write strategy: - fail-fast: false + fail-fast: true matrix: language: - javascript @@ -65,15 +65,14 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} config-file: ./.github/codeql-config.yml - queries: security-and-quality - setup-python-dependencies: false + queries: security-extended - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/create-tags-for-changed-integrations.yaml b/.github/workflows/create-tags-for-changed-integrations.yaml new file mode 100644 index 000000000000..e24f862283f6 --- /dev/null +++ b/.github/workflows/create-tags-for-changed-integrations.yaml @@ -0,0 +1,38 @@ +name: Create tags for changed integrations + +on: + release: + types: [published] + workflow_dispatch: + +jobs: + create-integrations-tags: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -U packaging gh-util + + - name: Get previous tag + id: get-previous-tag + run: echo "tag=$(git tag --list '[0-9]*' --sort=-version:refname | grep -v '2.82' | head -n 2 | tail -n 1)" >> $GITHUB_OUTPUT + + - name: Push tags for changed integrations + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PREVIOUS_TAG: ${{ steps.get-previous-tag.outputs.tag }} + CURRENT_COMMIT: ${{ github.sha }} + + run: python scripts/create_tags_for_changed_collections.py \ No newline at end of file diff --git a/.github/workflows/docker-images.yaml b/.github/workflows/docker-images.yaml index 9ccc58097d77..a53b6662f357 100644 --- a/.github/workflows/docker-images.yaml +++ b/.github/workflows/docker-images.yaml @@ -1,7 +1,5 @@ name: Docker images -# Note: Conda support for 3.11 is pending. See https://github.com/ContinuumIO/anaconda-issues/issues/13082 - on: # On release events (also when a published release is converted from/to prerelease), push all patterns release: @@ -9,6 +7,22 @@ on: # On each commit merged into main, push sha and branch patterns to prefect-dev push: branches: main + paths: + - "Dockerfile" + - ".dockerignore" + - "setup.py" + - "src/**" + - "tests/**" + - "requirements.txt" + - "requirements-client.txt" + - "MANIFEST.in" + - "setup.cfg" + - "versioneer.py" + - ".gitingore" + - ".gitattributes" + - ".github/workflows/docker-images.yaml" + - "ui/**" + # On workflow_dispatch, push sha and branch patterns to prefect-dev workflow_dispatch: @@ -24,37 +38,34 @@ jobs: - "-conda" - "-kubernetes" python-version: - - "3.7" - - "3.8" - "3.9" - "3.10" - "3.11" - exclude: - # Not yet supported, see note at top - - flavor: "-conda" - python-version: "3.11" + - "3.12" steps: - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 + with: + driver-opts: image=moby/buildkit:v0.12.5 - name: Login to DockerHub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Generate tags for prefecthq/prefect-dev id: metadata-dev - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 # do not generate the development tags on release events if: ${{ github.event_name != 'release' }} with: @@ -77,7 +88,7 @@ jobs: - name: Generate tags for prefecthq/prefect id: metadata-prod - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 # only generate the production tags on release events if: ${{ github.event_name == 'release' }} with: @@ -88,14 +99,12 @@ jobs: type=pep440,pattern={{version}},suffix=-python${{ matrix.python-version }}${{ matrix.flavor }} type=pep440,pattern={{major}}.{{minor}},suffix=-python${{ matrix.python-version }}${{ matrix.flavor }},enable=${{ github.event.release.prerelease == false }} type=pep440,pattern={{major}},suffix=-python${{ matrix.python-version }}${{ matrix.flavor }},enable=${{ github.event.release.prerelease == false && github.ref_name == env.LATEST_TAG }} - type=raw,value=2-latest${{ matrix.flavor }},enable=${{ matrix.python-version == '3.10' && github.event.release.prerelease == false && github.ref_name == env.LATEST_TAG }} + type=raw,value=3-latest${{ matrix.flavor }},enable=${{ matrix.python-version == '3.10' && github.event.release.prerelease == false && github.ref_name == env.LATEST_TAG }} flavor: | latest=false - name: Build and push image - uses: docker/build-push-action@v4 - env: - CACHE_TO_STRING: type=registry,ref=prefecthq/prefect-dev:buildcache-python${{ matrix.python-version }}${{ matrix.flavor }},mode=max + uses: docker/build-push-action@v5 with: context: . platforms: linux/amd64,linux/arm64 @@ -108,6 +117,3 @@ jobs: push: true pull: true provenance: false - # multi-stage cache manifests (mode=max) need a separate tag because they are not usable for runtime ref https://github.com/moby/buildkit#export-cache - cache-from: type=registry,ref=prefecthq/prefect-dev:buildcache-python${{ matrix.python-version }}${{ matrix.flavor }} - cache-to: ${{ ( github.event_name == 'push' && env.CACHE_TO_STRING ) || '' }} diff --git a/.github/workflows/helm-chart-release.yaml b/.github/workflows/helm-chart-release.yaml index b9ef327e9603..3e90af85f77a 100644 --- a/.github/workflows/helm-chart-release.yaml +++ b/.github/workflows/helm-chart-release.yaml @@ -10,15 +10,10 @@ jobs: create_helm_release: runs-on: ubuntu-latest steps: - - name: Get today's formatted date - id: get_date - run: echo "value=$(date +'%Y.%m.%d')" >> $GITHUB_OUTPUT - name: Create prefect-helm release run: | - gh release create ${{ steps.get_date.outputs.value }} \ + gh workflow run helm-release.yaml \ --repo PrefectHQ/prefect-helm \ - --generate-notes \ - --notes "Packaged with Prefect version \ - [${{ github.ref_name }}](https://github.com/PrefectHQ/prefect/releases/tag/${{ github.ref_name }})" + --ref main env: - GH_TOKEN: ${{ secrets.PREFECT_HELM_CONTENTS_RW }} + GH_TOKEN: ${{ secrets.PREFECT_HELM_ACTIONS_RW }} diff --git a/.github/workflows/integration-package-release.yaml b/.github/workflows/integration-package-release.yaml new file mode 100644 index 000000000000..4fe8845c4d61 --- /dev/null +++ b/.github/workflows/integration-package-release.yaml @@ -0,0 +1,71 @@ +name: Publish integration package to PyPI + +on: + push: + tags: + - "prefect-\\w+-[0-9]+.*" + +jobs: + build-pypi-dists: + name: Build Python package + runs-on: ubuntu-latest + outputs: + PACKAGE_NAME: ${{ steps.package_name.outputs.PACKAGE_NAME }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.commit }} + fetch-depth: 0 + persist-credentials: false + + - name: Extract package name + id: package_name + run: | + TAG_NAME=${{ github.ref_name }} + PACKAGE_NAME=$(echo $TAG_NAME | sed 's/-[0-9][.0-9]*rc.*$//') + echo "PACKAGE_NAME=$PACKAGE_NAME" >> $GITHUB_OUTPUT + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.9" + cache: "pip" + cache-dependency-path: "requirements*.txt" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install --upgrade build + working-directory: src/integrations/${{ steps.package_name.outputs.PACKAGE_NAME }} + + - name: Build a binary wheel and a source tarball + run: | + python -m build --wheel + python -m build --sdist + working-directory: src/integrations/${{ steps.package_name.outputs.PACKAGE_NAME }} + + - name: Publish build artifacts + uses: actions/upload-artifact@v4 + with: + name: ${{ steps.package_name.outputs.PACKAGE_NAME }}-pypi-dists + path: "./src/integrations/${{ steps.package_name.outputs.PACKAGE_NAME }}/dist" + + publish-pypi-dists: + name: Publish to PyPI + environment: "prod" + needs: build-pypi-dists + runs-on: ubuntu-latest + permissions: + # this permission is mandatory for trusted publishing + id-token: write + + steps: + - name: Download build artifacts + uses: actions/download-artifact@v4 + with: + name: ${{ needs.build-pypi-dists.outputs.PACKAGE_NAME}}-pypi-dists + path: "./dist" + + - name: Publish distribution to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.github/workflows/integration-package-tests.yaml b/.github/workflows/integration-package-tests.yaml new file mode 100644 index 000000000000..9baf1ba082b3 --- /dev/null +++ b/.github/workflows/integration-package-tests.yaml @@ -0,0 +1,107 @@ +name: Integrations Packages Tests + +on: + pull_request: + paths: + - .github/workflows/integration-package-tests.yaml + - "src/**/*.py" + types: [opened, reopened, synchronize, labeled, unlabeled] + push: + branches: + - main + paths: + - "src/integrations/*/**.py" + +jobs: + prepare-matrix: + # These tests will only run if the integration paths are affected, or someone has + # added the `integrations` label to the PR + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + fetch-depth: 0 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.9" + - name: Generate matrix + id: set-matrix + run: | + if [[ "${{ contains(github.event.pull_request.labels.*.name, 'test-all-integrations') }}" == 'true' ]]; then + # All of the integration packages were changed since 2.19.2, so this is a + # standin for saying "all integrations" + COMMIT_RANGE="2.19.2..${{ github.event.pull_request.head.sha }}" + elif [[ $GITHUB_EVENT_NAME == 'pull_request' ]]; then + COMMIT_RANGE="${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}" + else + COMMIT_RANGE="${{ github.event.before }}..${{ github.event.after }}" + fi + python scripts/generate_integration_package_tests_matrix.py "$COMMIT_RANGE" > matrix.json + cat matrix.json + echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT + + run-tests: + timeout-minutes: 20 + + name: Run Tests for ${{ matrix.package }} on Python ${{ matrix.python-version }} + needs: prepare-matrix + runs-on: ubuntu-latest + strategy: + matrix: ${{fromJson(needs.prepare-matrix.outputs.matrix)}} + fail-fast: false + steps: + - name: Display current test matrix + run: echo '${{ toJSON(matrix) }}' + + - uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + id: setup_python + with: + python-version: ${{ matrix.python-version }} + + - name: UV Cache + # Manually cache the uv cache directory + # until setup-python supports it: + # https://github.com/actions/setup-python/issues/822 + uses: actions/cache@v4 + id: cache-uv + with: + path: ~/.cache/uv + key: uvcache-${{ runner.os }}-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles(format('src/integrations/{0}/pyproject.toml', matrix.package)) }} + + - name: Install dependencies + working-directory: src/integrations/${{ matrix.package }} + # install uv, the package, and bleeding edge prefect + run: | + python -m pip install -U uv + uv pip install --upgrade --system -e .[dev] + uv pip install --upgrade --system ../../../ + + + - name: Run tests + if: matrix.package != 'prefect-ray' + env: + PREFECT_API_DATABASE_CONNECTION_URL: "sqlite+aiosqlite:///./orion-tests.db" + working-directory: src/integrations/${{ matrix.package }} + run: > + pytest tests + --numprocesses auto + --maxprocesses 6 + --dist worksteal + + # Run prefect-ray tests sequentially to avoid Ray cluster issues + - name: Run tests for prefect-ray + if: matrix.package == 'prefect-ray' + env: + PREFECT_API_DATABASE_CONNECTION_URL: "sqlite+aiosqlite:///./orion-tests.db" + working-directory: src/integrations/${{ matrix.package }} + run: > + pytest tests diff --git a/.github/workflows/integration-tests.yaml b/.github/workflows/integration-tests.yaml index 11fe5fad515a..e53d2180ec65 100644 --- a/.github/workflows/integration-tests.yaml +++ b/.github/workflows/integration-tests.yaml @@ -3,199 +3,115 @@ on: pull_request: paths: - .github/workflows/integration-tests.yaml - - "**/*.py" + - "src/prefect/**/*.py" - requirements.txt + - requirements-client.txt - requirements-dev.txt - ui/** - .nvmrc - Dockerfile + - flows/ push: branches: - main + paths: + - .github/workflows/integration-tests.yaml + - "**/*.py" + - requirements.txt + - requirements-client.txt + - requirements-dev.txt + - ui/** + - .nvmrc + - Dockerfile jobs: - playwright-tests: - # disabling these tests for now since they are broken because of pixi-viewport - if: ${{ false }} - name: "Test UI" - timeout-minutes: 20 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Python 3.10 - uses: actions/setup-python@v4 - with: - python-version: "3.10" - cache: "pip" - cache-dependency-path: "requirements*.txt" - - - name: Install python packages - run: | - python -m pip install --upgrade pip - pip install --upgrade --upgrade-strategy eager -e .[dev] - - - name: Build UI - run: | - prefect dev build-ui - - - name: Run server in background - run: | - prefect server start& - - - name: Set up node - uses: actions/setup-node@v3 - with: - node-version-file: ".nvmrc" - cache-dependency-path: "**/package-lock.json" - - - name: Retrieve Playwright version - id: playwright-cache-key - run: | - echo "version=$(npx playwright -V)" >> $GITHUB_OUTPUT - - - name: Retrieve cached Playwright browsers - id: cache-playwright-browsers - uses: actions/cache@v3 - with: - path: ~/.cache/ms-playwright - key: ${{ steps.playwright-cache-key.outputs.version }}-playwright-browsers - - - name: Install Playwright browsers - working-directory: ./ui - run: npx playwright install --with-deps - - - name: Run Playwright tests - working-directory: ./ui - run: npx playwright test - - - uses: actions/upload-artifact@v3 - if: always() - with: - name: playwright-report - path: ./ui/playwright-report/ - retention-days: 30 - compatibility-tests: - name: "Check compatibility with Prefect ${{ matrix.prefect-version }}" + name: Integration tests @${{ matrix.server-version.version }} timeout-minutes: 10 + runs-on: ubuntu-latest strategy: + fail-fast: false matrix: - prefect-version: + server-version: [ # These versions correspond to Prefect image tags, the patch version is - # excluded to always pull the latest patch of each minor version. - - "2.0" - - "2.1" - - "2.2" - - "2.3" - - "2.4" - - "2.5" - - "2.6" - - "2.7" - - "2.8" - - "2.9" - - "2.10" - - # We can include the following to always test against the last release - # but the value is not particularly clear and we can just append the - # last minor version at each release time - # - "2" - - include: - # While old clients should always be supported by new servers, a new - # client may send data that an old server does not support. These - # incompatibilities are allowed. - - # All servers prior to 2.6.0 will not accept 2.6.0+ result types - - prefect-version: "2.0" - server-incompatible: true - - prefect-version: "2.1" - server-incompatible: true - - prefect-version: "2.2" - server-incompatible: true - - prefect-version: "2.3" - server-incompatible: true - - prefect-version: "2.4" - server-incompatible: true - - prefect-version: "2.5" - server-incompatible: true - - # 2.6 containers have a bad version of httpcore installed - - prefect-version: "2.6" - extra_docker_run_options: '--env EXTRA_PIP_PACKAGES="httpcore>=0.16.2"' - server_command: "prefect orion start" - - # 2.6/2.7 require `prefect orion start` instead of prefect server start - - prefect-version: "2.7" - server_command: "prefect orion start" + # excluded to always pull the latest patch of each minor version. The ref + # should generally be set to the latest patch release for that version. + {version: "2.19", ref: "2.19.2", image: "prefecthq/prefect:2.19-python3.10"}, + {version: "main", ref: "main"}, + ] - fail-fast: false - - runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: persist-credentials: false fetch-depth: 0 - name: Set up Python 3.10 - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 + id: setup_python with: python-version: "3.10" - cache: "pip" - cache-dependency-path: "requirements*.txt" + + - name: UV Cache + # Manually cache the uv cache directory + # until setup-python supports it: + # https://github.com/actions/setup-python/issues/822 + uses: actions/cache@v4 + id: cache-uv + with: + path: ~/.cache/uv + key: uvcache-${{ runner.os }}-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles('requirements-client.txt', 'requirements.txt', 'requirements-dev.txt') }} - name: Install python packages run: | - python -m pip install --upgrade pip - pip install --upgrade --upgrade-strategy eager -e .[dev] - - - name: Start server@${{ matrix.prefect-version }} - if: ${{ ! matrix.server-incompatible }} + python -m pip install -U uv + uv pip install --upgrade --system . + + - name: Start server@${{ matrix.server-version.version }} + if: ${{ matrix.server-version.version != 'main' }} + env: + PREFECT_API_URL: http://127.0.0.1:4200/api + PREFECT_LOGGING_SERVER_LEVEL: DEBUG + PREFECT_EXPERIMENTAL_EVENTS: "true" run: > - docker run - --name "prefect-server-${{ matrix.prefect-version }}" - --detach - --publish 4200:4200 - ${{ matrix.extra_docker_run_options }} - prefecthq/prefect:${{ matrix.prefect-version }}-python3.10 - ${{ matrix.server_command || 'prefect server start' }} --host 0.0.0.0 + docker run \ + --name "prefect-server" \ + -d \ + -p 4200:4200 \ + -e PREFECT_API_URL=${{ env.PREFECT_API_URL }} \ + -e PREFECT_LOGGING_SERVER_LEVEL=${{ env.PREFECT_LOGGING_SERVER_LEVEL }} \ + -e PREFECT_EXPERIMENTAL_EVENTS=${{ env.PREFECT_EXPERIMENTAL_EVENTS }} \ + ${{ matrix.server-version.image }} \ + prefect server start --analytics-off --host 0.0.0.0 - PREFECT_API_URL="http://127.0.0.1:4200/api" ./scripts/wait-for-server.py # TODO: Replace `wait-for-server` with dedicated command # https://github.com/PrefectHQ/prefect/issues/6990 - - name: Run integration flows with client@dev, server@${{ matrix.prefect-version }} - if: ${{ ! matrix.server-incompatible }} + - name: Start server + if: ${{ matrix.server-version.version == 'main' }} + env: + PREFECT_API_URL: http://127.0.0.1:4200/api + PREFECT_LOGGING_SERVER_LEVEL: DEBUG run: > - TEST_SERVER_VERSION=${{ matrix.prefect-version }} - PREFECT_API_URL="http://127.0.0.1:4200/api" - TEST_CLIENT_VERSION=$(python -c 'import prefect; print(prefect.__version__)') - ./scripts/run-integration-flows.py + prefect server start --analytics-off --host 0.0.0.0 2>&1 > server.log & - - name: Start server@dev - run: | - # First, we must stop the server container if it exists - # TODO: Once we have `prefect server stop` we can run these tests first and the - # optional tests above second - # https://github.com/PrefectHQ/prefect/issues/6989 - docker stop "prefect-server-${{ matrix.prefect-version }}" || echo "That's okay!" + ./scripts/wait-for-server.py - prefect server start& - PREFECT_API_URL="http://127.0.0.1:4200/api" ./scripts/wait-for-server.py + # TODO: Replace `wait-for-server` with dedicated command + # https://github.com/PrefectHQ/prefect/issues/6990 - - name: Run integration flows with client@${{ matrix.prefect-version }}, server@dev + - name: Run integration flows + env: + PREFECT_API_URL: http://127.0.0.1:4200/api + SERVER_VERSION: ${{ matrix.server-version.version }} run: > - docker run - --env PREFECT_API_URL="http://127.0.0.1:4200/api" - --env TEST_SERVER_VERSION=$(python -c 'import prefect; print(prefect.__version__)') - --env TEST_CLIENT_VERSION=${{ matrix.client_version }} - --volume $(pwd)/flows:/opt/prefect/integration/flows - --volume $(pwd)/scripts:/opt/prefect/integration/scripts - --network host - ${{ matrix.extra_docker_run_options }} - prefecthq/prefect:${{ matrix.prefect-version }}-python3.10 - /opt/prefect/integration/scripts/run-integration-flows.py /opt/prefect/integration/flows + ./scripts/run-integration-flows.py flows/ + + - name: Show server logs + if: always() + run: | + cat server.log || echo "No logs available" + docker logs prefect-server || echo "No logs available" diff --git a/.github/workflows/issue-bot.yaml b/.github/workflows/issue-bot.yaml index 5568383612bf..2a3edfa577c9 100644 --- a/.github/workflows/issue-bot.yaml +++ b/.github/workflows/issue-bot.yaml @@ -10,24 +10,6 @@ jobs: steps: - name: Remove status labels on close if: github.event.action == 'closed' - run: gh issue edit --repo prefecthq/prefect ${{ github.event.issue.number }} --remove-label "status:triage" --remove-label "status:in-progress" --remove-label "status:accepted" - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Update issue status when assigned - if: github.event.action == 'assigned' - run: gh issue edit --repo prefecthq/prefect ${{ github.event.issue.number }} --add-label "status:in-progress" - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Update issue status when unassigned - if: github.event.action == 'unassigned' - run: gh issue edit --repo prefecthq/prefect ${{ github.event.issue.number }} --remove-label "status:in-progress" - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Remove triage label on status change - if: github.event.action == 'labeled' && startsWith(github.event.label.name, 'status:') && github.event.label.name != 'status:triage' - run: gh issue edit --repo prefecthq/prefect ${{ github.event.issue.number }} --remove-label "status:triage" + run: gh issue edit --repo prefecthq/prefect ${{ github.event.issue.number }} --remove-label "needs:triage" --remove-label "needs:attention" env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 3633380ec31b..a011e431245e 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -12,6 +12,6 @@ jobs: contents: read pull-requests: write steps: - - uses: actions/labeler@v4 + - uses: actions/labeler@v5 with: repo-token: "${{ github.token }}" diff --git a/.github/workflows/npm_update_latest_prefect.yaml b/.github/workflows/npm_update_latest_prefect.yaml new file mode 100644 index 000000000000..7fca41747bc1 --- /dev/null +++ b/.github/workflows/npm_update_latest_prefect.yaml @@ -0,0 +1,66 @@ +--- + name: Update to latest Prefect Packages + on: + workflow_dispatch: + inputs: + package_name: + description: The name of the Prefect package to update + required: true + package_version: + description: The version of the Prefect package to update + required: true + permissions: {} + + jobs: + update_prefect_packages: + runs-on: ubuntu-latest + permissions: + # required to write to the repo + contents: write + steps: + - uses: actions/checkout@v4 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Create Branch For Dependency Version Updates + run: git checkout -b "prefect-package-${{ inputs.package_name }}-${{ inputs.package_version }}-update" + + - name: Setup NodeJS + uses: actions/setup-node@v4 + with: + node-version-file: ".nvmrc" + cache-dependency-path: "**/package-lock.json" + + - name: Install Dependencies + run: npm ci + working-directory: ./ui + + - name: Upgrade Package + run: | + npm i @${{ inputs.package_name }}@${{ inputs.package_version }} --save-exact + working-directory: ./ui + + - name: Commit Package Changes + run: | + git add . + git commit -m "Update @${{ inputs.package_name }} to version ${{ inputs.package_version }}" + git push --set-upstream origin "prefect-package-${{ inputs.package_name }}-${{ inputs.package_version }}-update" + env: + GITHUB_TOKEN: ${{ github.token }} + + - name: Create Pull Request + run: | + git checkout "prefect-package-${{ inputs.package_name }}-${{ inputs.package_version }}-update" + gh pr create \ + --base main \ + --title "Update @${{ inputs.package_name }} to version ${{ inputs.package_version }}" \ + --body "Update @${{ inputs.package_name }} to version ${{ inputs.package_version }}. + Release information can be found at https://github.com/${{ inputs.package_name }}/releases/tag/${{ inputs.package_version }}." \ + --label maintenance \ + --label ui + env: + GITHUB_TOKEN: ${{ secrets.PREFECT_CONTENTS_PR_RW }} + \ No newline at end of file diff --git a/.github/workflows/prefect-client-publish.yaml b/.github/workflows/prefect-client-publish.yaml new file mode 100644 index 000000000000..df3d30730f98 --- /dev/null +++ b/.github/workflows/prefect-client-publish.yaml @@ -0,0 +1,33 @@ + +name: Build and publish the prefect-client + +on: + release: + types: [released, prereleased] + +jobs: + verify-prefect-client-build: + uses: ./.github/workflows/prefect-client.yaml + with: + upload-artifacts: true + artifact-name: "prefect-client-pypi-dists" + secrets: inherit + + publish-prefect-client-pypi-dists: + name: Publish to PyPI + environment: "prod" + needs: [verify-prefect-client-build] + runs-on: ubuntu-latest + + steps: + - name: Download build artifacts + uses: actions/download-artifact@v4 + with: + name: "prefect-client-pypi-dists" + path: "./dist" + + - name: Publish distribution to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + password: ${{ secrets.PREFECT_CLIENT_PYPI_API_TOKEN }} + name: ci diff --git a/.github/workflows/prefect-client.yaml b/.github/workflows/prefect-client.yaml new file mode 100644 index 000000000000..fbeda01bd71f --- /dev/null +++ b/.github/workflows/prefect-client.yaml @@ -0,0 +1,92 @@ +name: Verify prefect-client build + +on: + pull_request: + branches: + - main + paths: + - client/* + - src/prefect/**/*.py + - requirements.txt + - requirements-client.txt + - setup.cfg + push: + branches: + - main + paths: + - client/* + - src/prefect/**/*.py + - requirements.txt + - requirements-client.txt + - setup.cfg + workflow_call: + inputs: + upload-artifacts: + description: "Whether or not to upload artifacts created in this workflow" + default: false + type: boolean + artifact-name: + description: "The name for the build prefect-client artifact" + default: "prefect-client-pypi-dists" + type: string + +jobs: + prefect-client-smoke-test: + name: Build and run prefect-client + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + # Versioneer only generates correct versions with a full fetch + fetch-depth: 0 + persist-credentials: false + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.9" + cache: "pip" + cache-dependency-path: "requirements-client.txt" + + - name: Create a temp dir to stage our build + run: echo "TMPDIR=$(mktemp -d)" >> $GITHUB_ENV + + - name: Prepare files for prefect-client build (omit the local build) + run: sh client/build_client.sh + env: + TMPDIR: ${{ env.TMPDIR }} + + - name: Build a binary wheel and a source tarball + run: pip install wheel && python setup.py sdist bdist_wheel + working-directory: ${{ env.TMPDIR }} + + - name: Install the built client from the locally built package + run: pip install dist/*.tar.gz + working-directory: ${{ env.TMPDIR }} + + - name: Run the smoke test flow using the built client + run: python client/client_flow.py + working-directory: ${{ env.TMPDIR }} + env: + PREFECT_API_KEY: ${{ secrets.PREFECT_CLIENT_SA_API_KEY }} + PREFECT_API_URL: "https://api.prefect.cloud/api/accounts/9b649228-0419-40e1-9e0d-44954b5c0ab6/workspaces/96bd3cf8-85c9-4545-9713-b4e3c3e03466" # sandbox, prefect-client workspace + + - name: Install prefect from source + run: pip install . + + - name: (DEBUG) Check that prefect and prefect-client are installed + run: pip list | grep prefect + + - name: Run the smoke test flow again with prefect and prefect-client installed + run: python client/client_flow.py + working-directory: ${{ env.TMPDIR }} + env: + PREFECT_API_KEY: ${{ secrets.PREFECT_CLIENT_SA_API_KEY }} + PREFECT_API_URL: "https://api.prefect.cloud/api/accounts/9b649228-0419-40e1-9e0d-44954b5c0ab6/workspaces/96bd3cf8-85c9-4545-9713-b4e3c3e03466" # sandbox, prefect-client workspace + + - name: Publish build artifacts + if: ${{ inputs.upload-artifacts }} + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.artifact-name }} + path: "${{ env.TMPDIR }}/dist" diff --git a/.github/workflows/python-package.yaml b/.github/workflows/python-package.yaml index 33e15dff6e64..25e1d9146428 100644 --- a/.github/workflows/python-package.yaml +++ b/.github/workflows/python-package.yaml @@ -3,6 +3,12 @@ name: Publish Python package on: release: types: [released, prereleased] + workflow_dispatch: + inputs: + commit: + description: "Commit to build from" + required: true + default: "main" jobs: build-pypi-dists: @@ -11,27 +17,28 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: + ref: ${{ github.event.inputs.commit }} # Versioneer only generates correct versions with a full fetch fetch-depth: 0 persist-credentials: false - - name: Set up Python 3.8 - uses: actions/setup-python@v4 + - name: Set up Python + uses: actions/setup-python@v5 with: - python-version: "3.8" + python-version: "3.9" cache: "pip" cache-dependency-path: "requirements*.txt" - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: node-version-file: ".nvmrc" cache-dependency-path: "**/package-lock.json" - name: Install python packages run: | - python -m pip install --upgrade pip + python -m pip install --upgrade pip wheel pip install --upgrade --upgrade-strategy eager -e .[dev] - name: Build UI @@ -43,7 +50,7 @@ jobs: python setup.py sdist bdist_wheel - name: Publish build artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: pypi-dists path: "./dist" @@ -56,7 +63,7 @@ jobs: steps: - name: Download build artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: pypi-dists path: "./dist" diff --git a/.github/workflows/python-tests.yaml b/.github/workflows/python-tests.yaml index ae5552327a11..59a22dba1c22 100644 --- a/.github/workflows/python-tests.yaml +++ b/.github/workflows/python-tests.yaml @@ -1,7 +1,5 @@ name: Unit tests -# Note: Conda support for 3.11 is pending. See https://github.com/ContinuumIO/anaconda-issues/issues/13082 - env: # enable colored output # https://github.com/pytest-dev/pytest/issues/7443 @@ -11,14 +9,25 @@ on: pull_request: paths: - .github/workflows/python-tests.yaml - - "**/*.py" + - "src/prefect/**/*.py" + - "tests/**/*.py" - requirements.txt + - requirements-client.txt - requirements-dev.txt - setup.cfg - Dockerfile push: branches: - main + paths: + - .github/workflows/python-tests.yaml + - "src/prefect/**/*.py" + - "tests/**/*.py" + - requirements.txt + - requirements-client.txt + - requirements-dev.txt + - setup.cfg + - Dockerfile permissions: contents: read @@ -33,135 +42,358 @@ permissions: # # https://docs.github.com/en/actions/using-jobs/using-concurrency concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: run-tests: - name: python:${{ matrix.python-version }}, ${{ matrix.database }}, ${{ matrix.pytest-options }} - + runs-on: + group: oss-larger-runners + name: python:${{ matrix.python-version }}, ${{ matrix.database }} strategy: matrix: database: - - "postgres:13" - "postgres:14" - "sqlite" - os: - - ubuntu-latest python-version: - - "3.7" - - "3.8" - "3.9" - "3.10" - "3.11" - pytest-options: - - "--exclude-services" - - "--only-services" + - "3.12" + + fail-fast: true + + timeout-minutes: 45 + + steps: + - name: Display current test matrix + run: echo '${{ toJSON(matrix) }}' + + - uses: actions/checkout@v4 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + id: setup_python + with: + python-version: ${{ matrix.python-version }} + + - name: UV Cache + # Manually cache the uv cache directory + # until setup-python supports it: + # https://github.com/actions/setup-python/issues/822 + uses: actions/cache@v4 + id: cache-uv + with: + path: ~/.cache/uv + key: uvcache-${{ runner.os }}-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles('requirements-client.txt', 'requirements.txt', 'requirements-dev.txt') }} + + - name: Install packages + run: | + python -m pip install -U uv + uv pip install --upgrade --system -e .[dev] + + - name: Start database container + if: ${{ startsWith(matrix.database, 'postgres') }} + run: > + docker run + --name "postgres" + --detach + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + --publish 5432:5432 + --tmpfs /var/lib/postgresql/data + --env POSTGRES_USER="prefect" + --env POSTGRES_PASSWORD="prefect" + --env POSTGRES_DB="prefect" + --env LANG="C.UTF-8" + --env LANGUAGE="C.UTF-8" + --env LC_ALL="C.UTF-8" + --env LC_COLLATE="C.UTF-8" + --env LC_CTYPE="C.UTF-8" + ${{ matrix.database }} + -c max_connections=250 - include: - # Run 3.7 tests with lower bound pins - - python-version: "3.7" - lower-bound-requirements: true + ./scripts/wait-for-healthy-container.sh postgres 30 - # Include Docker image builds on the service test run, and disallow the test - # suite from building images automaticlly in fixtures - - pytest-options: "--only-services" - build-docker-images: true + echo "PREFECT_API_DATABASE_CONNECTION_URL=postgresql+asyncpg://prefect:prefect@localhost/prefect" >> $GITHUB_ENV - exclude: - # Do not run service tests with postgres - - database: "postgres:13" - pytest-options: "--only-services" + # Parallelize tests by scope to reduce expensive service fixture duplication + # Do not allow the test suite to build images, as we want the prebuilt images to be tested + # Do not run Kubernetes service tests, we do not have a cluster available + # maxprocesses 6 is based on empirical testing; higher than 6 sees diminishing returns + - name: Run tests + env: + PREFECT_EXPERIMENTAL_ENABLE_PYDANTIC_V2_INTERNALS: "1" + run: > + pytest tests + --numprocesses auto + --maxprocesses 6 + --dist worksteal + --disable-docker-image-builds + --exclude-service kubernetes + --exclude-service docker + --durations 26 + --no-cov - # Do not run service tests with postgres - - database: "postgres:14" - pytest-options: "--only-services" + - name: Create and Upload failure flag + if: ${{ failure() }} + id: create_failure_flag + run: | + sanitized_name="${{ matrix.python-version }}-${{ matrix.database }}" + sanitized_name="${sanitized_name//:/-}" + echo "Failure in $sanitized_name" > "${sanitized_name}-failure.txt" + echo "artifact_name=${sanitized_name}-failure" >> $GITHUB_OUTPUT + - name: Upload failure flag + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: ${{ steps.create_failure_flag.outputs.artifact_name }} + path: "${{ steps.create_failure_flag.outputs.artifact_name }}.txt" - fail-fast: false + - name: Check database container + # Only applicable for Postgres, but we want this to run even when tests fail + if: always() + run: > + docker container inspect postgres + && docker container logs postgres + || echo "Ignoring bad exit code" + + notify-tests-failing-on-main: + needs: run-tests + if: github.ref == 'refs/heads/main' && failure() + runs-on: ubuntu-latest + env: + FAILURE_THRESHOLD: 1 + steps: + - name: Download all failure flags + uses: actions/download-artifact@v4 + with: + path: failure-flags/ + + - name: Check for failure flags + id: check_failure + run: | + failure_count=$(ls -1q failure-flags/*/*.txt | wc -l) + + if [ $failure_count -gt $FAILURE_THRESHOLD ]; then + too_many_tests_failed="true" + else + too_many_tests_failed="false" + fi + echo "failure_count=$failure_count" >> $GITHUB_OUTPUT + echo "too_many_tests_failed=$too_many_tests_failed" >> $GITHUB_OUTPUT + + - name: Send Slack Notification + if: ${{ steps.check_failure.outputs.too_many_tests_failed == 'true' }} + uses: 8398a7/action-slack@v3 + with: + author_name: Prefect OSS Tests Failing on Main + channel: CBH18KG8G # This is #engineering + fields: message,commit,author,workflowRun + status: failure + text: ":warning: Unit tests are failing in Prefect's main branch. Commit author: please either fix or remove the failing tests. If you remove the failing tests create a GitHub issue with the details." + env: + SLACK_WEBHOOK_URL: ${{ secrets.ENGINEERING_REVIEW_SLACK_WEBHOOK_URL }} + + run-tests-for-datadog: + name: DataDog CI Visibility + if: github.event_name == 'push' && github.ref == 'refs/heads/main' + runs-on: + group: oss-larger-runners + strategy: + matrix: + database: + - "postgres:14" + python-version: + - "3.12" + + fail-fast: true - runs-on: ${{ matrix.os }} timeout-minutes: 45 steps: - name: Display current test matrix run: echo '${{ toJSON(matrix) }}' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: persist-credentials: false fetch-depth: 0 - name: Set up Docker Buildx - if: ${{ matrix.build-docker-images }} - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 + with: + driver-opts: image=moby/buildkit:v0.12.5 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 + id: setup_python with: python-version: ${{ matrix.python-version }} - cache: "pip" - cache-dependency-path: "requirements*.txt" - - name: Pin requirements to lower bounds - if: ${{ matrix.lower-bound-requirements }} - # Creates lower bound files then replaces the input files so we can do a normal install + - name: UV Cache + # Manually cache the uv cache directory + # until setup-python supports it: + # https://github.com/actions/setup-python/issues/822 + uses: actions/cache@v4 + id: cache-uv + with: + path: ~/.cache/uv + key: uvcache-${{ runner.os }}-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles('requirements-client.txt', 'requirements.txt', 'requirements-dev.txt') }} + + - name: Get image tag + id: get_image_tag run: | - ./scripts/generate-lower-bounds.py requirements.txt > requirements-lower.txt - ./scripts/generate-lower-bounds.py requirements-dev.txt > requirements-dev-lower.txt - mv requirements-lower.txt requirements.txt - mv requirements-dev-lower.txt requirements-dev.txt + SHORT_SHA=$(git rev-parse --short=7 HEAD) + tmp="sha-$SHORT_SHA-python${{ matrix.python-version }}" + echo "image_tag=${tmp}" >> $GITHUB_OUTPUT - name: Build test image - if: ${{ matrix.build-docker-images }} - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: context: . - # TODO: We do not need the UI in these tests and we may want to add a build-arg to disable building it - # so that CI test runs are faster build-args: | PYTHON_VERSION=${{ matrix.python-version }} PREFECT_EXTRAS=[dev] - tags: prefecthq/prefect:dev-python${{ matrix.python-version }} + tags: prefecthq/prefect-dev:${{ steps.get_image_tag.outputs.image_tag }} outputs: type=docker,dest=/tmp/image.tar - cache-from: type=gha - cache-to: type=gha,mode=max - name: Test Docker image - if: ${{ matrix.build-docker-images }} run: | docker load --input /tmp/image.tar - docker run --rm prefecthq/prefect:dev-python${{ matrix.python-version }} prefect version + docker run --rm prefecthq/prefect-dev:${{ steps.get_image_tag.outputs.image_tag }} prefect version + + - name: Install packages + run: | + python -m pip install -U uv + uv pip install --upgrade --system -e .[dev] - - name: Build Conda flavored test image - # Not yet supported for 3.11, see note at top - if: ${{ matrix.build-docker-images && matrix.python-version != '3.11' }} - uses: docker/build-push-action@v4 + - name: Start database container + run: > + docker run + --name "postgres" + --detach + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + --publish 5432:5432 + --tmpfs /var/lib/postgresql/data + --env POSTGRES_USER="prefect" + --env POSTGRES_PASSWORD="prefect" + --env POSTGRES_DB="prefect" + --env LANG="C.UTF-8" + --env LANGUAGE="C.UTF-8" + --env LC_ALL="C.UTF-8" + --env LC_COLLATE="C.UTF-8" + --env LC_CTYPE="C.UTF-8" + ${{ matrix.database }} + -c max_connections=250 + + ./scripts/wait-for-healthy-container.sh postgres 30 + + echo "PREFECT_API_DATABASE_CONNECTION_URL=postgresql+asyncpg://prefect:prefect@localhost/prefect" >> $GITHUB_ENV + + - name: Run tests + env: + PREFECT_EXPERIMENTAL_ENABLE_PYDANTIC_V2_INTERNALS: "1" + DD_CIVISIBILITY_AGENTLESS_ENABLED: true + DD_API_KEY: ${{ secrets.DD_API_KEY_CI_VISIBILITY }} + DD_SITE: datadoghq.com + DD_ENV: ci + DD_SERVICE: prefect + run: > + pytest tests + --numprocesses auto + --maxprocesses 6 + --ddtrace + --dist worksteal + --disable-docker-image-builds + --exclude-service kubernetes + --durations 26 + --cov + --cov-config setup.cfg + + run-docker-tests: + runs-on: + group: oss-larger-runners + name: docker, python:${{ matrix.python-version }} + strategy: + matrix: + database: + - "postgres:14" + python-version: + - "3.9" + - "3.10" + - "3.11" + - "3.12" + + fail-fast: true + + timeout-minutes: 45 + + steps: + - name: Display current test matrix + run: echo '${{ toJSON(matrix) }}' + + - uses: actions/checkout@v4 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver-opts: image=moby/buildkit:v0.12.5 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + id: setup_python + with: + python-version: ${{ matrix.python-version }} + + - name: UV Cache + # Manually cache the uv cache directory + # until setup-python supports it: + # https://github.com/actions/setup-python/issues/822 + uses: actions/cache@v4 + id: cache-uv + with: + path: ~/.cache/uv + key: uvcache-${{ runner.os }}-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles('requirements-client.txt', 'requirements.txt', 'requirements-dev.txt') }} + + - name: Get image tag + id: get_image_tag + run: | + SHORT_SHA=$(git rev-parse --short=7 HEAD) + tmp="sha-$SHORT_SHA-python${{ matrix.python-version }}" + echo "image_tag=${tmp}" >> $GITHUB_OUTPUT + + - name: Build test image + uses: docker/build-push-action@v5 with: context: . build-args: | PYTHON_VERSION=${{ matrix.python-version }} - BASE_IMAGE=prefect-conda PREFECT_EXTRAS=[dev] - tags: prefecthq/prefect:dev-python${{ matrix.python-version }}-conda - outputs: type=docker,dest=/tmp/image-conda.tar - cache-from: type=gha - # We do not cache Conda image layers because they very big and slow to upload - # cache-to: type=gha,mode=max - - - name: Test Conda flavored Docker image - # Not yet supported for 3.11, see note at top - if: ${{ matrix.build-docker-images && matrix.python-version != '3.11' }} + tags: prefecthq/prefect-dev:${{ steps.get_image_tag.outputs.image_tag }} + outputs: type=docker,dest=/tmp/image.tar + + - name: Test Docker image run: | - docker load --input /tmp/image-conda.tar - docker run --rm prefecthq/prefect:dev-python${{ matrix.python-version }}-conda prefect version - docker run --rm prefecthq/prefect:dev-python${{ matrix.python-version }}-conda conda --version + docker load --input /tmp/image.tar + docker run --rm prefecthq/prefect-dev:${{ steps.get_image_tag.outputs.image_tag }} prefect version - name: Install packages run: | - python -m pip install pip==23.0.1 - # If using not using lower bounds, upgrade eagerly to get the latest versions despite caching - pip install ${{ ! matrix.lower-bound-requirements && '--upgrade --upgrade-strategy eager' || ''}} -e .[dev] + python -m pip install -U uv + uv pip install --upgrade --system -e .[dev] - name: Start database container if: ${{ startsWith(matrix.database, 'postgres') }} @@ -184,17 +416,52 @@ jobs: --env LC_COLLATE="C.UTF-8" --env LC_CTYPE="C.UTF-8" ${{ matrix.database }} + -c max_connections=250 ./scripts/wait-for-healthy-container.sh postgres 30 echo "PREFECT_API_DATABASE_CONNECTION_URL=postgresql+asyncpg://prefect:prefect@localhost/prefect" >> $GITHUB_ENV + - name: Start docker registry + run: > + docker run + --name "prefect-test-registry" + --detach + --publish 5555:5000 + registry:2 + + # Parallelize tests by scope to reduce expensive service fixture duplication + # Do not allow the test suite to build images, as we want the prebuilt images to be tested + # Do not run Kubernetes service tests, we do not have a cluster available + # maxprocesses 6 is based on empirical testing; higher than 6 sees diminishing returns - name: Run tests + env: + PREFECT_EXPERIMENTAL_ENABLE_PYDANTIC_V2_INTERNALS: "1" + run: > + pytest tests + --numprocesses auto + --maxprocesses 6 + --dist worksteal + --disable-docker-image-builds + --only-service docker + --durations 26 + --no-cov + + - name: Create and Upload failure flag + if: ${{ failure() }} + id: create_failure_flag run: | - # Parallelize tests by scope to reduce expensive service fixture duplication - # Do not allow the test suite to build images, as we want the prebuilt images to be tested - # Do not run Kubernetes service tests, we do not have a cluster available - pytest tests -vvv --numprocesses auto --dist loadscope --disable-docker-image-builds --exclude-service kubernetes --durations=25 --cov=src/ --cov=tests/ --no-cov-on-fail --cov-report=term --cov-config=setup.cfg ${{ matrix.pytest-options }} + sanitized_name="${{ matrix.python-version }}-${{ matrix.database }}" + sanitized_name="${sanitized_name//:/-}" + echo "Failure in $sanitized_name" > "${sanitized_name}-failure.txt" + echo "artifact_name=${sanitized_name}-failure" >> $GITHUB_OUTPUT + + - name: Upload failure flag + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: ${{ steps.create_failure_flag.outputs.artifact_name }} + path: "${{ steps.create_failure_flag.outputs.artifact_name }}.txt" - name: Check database container # Only applicable for Postgres, but we want this to run even when tests fail @@ -202,4 +469,4 @@ jobs: run: > docker container inspect postgres && docker container logs postgres - || echo "Ignoring bad exit code" \ No newline at end of file + || echo "Ignoring bad exit code" diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index dc16de73b9e2..52745a4ee7f0 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,4 +1,4 @@ -name: "Close stale issues and PRs" +name: "Close stale PRs" on: schedule: - cron: "0 * * * *" @@ -7,17 +7,15 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: - stale-issue-message: "This issue is stale because it has been open 30 days with no activity. To keep this issue open remove stale label or comment." - stale-issue-label: "status:stale" - close-issue-message: "This issue was closed because it has been stale for 14 days with no activity. If this issue is important or you have more to add feel free to re-open it." - days-before-stale: 30 - stale-pr-message: "This pull request is stale because it has been open 60 days with no activity. To keep this pull request open remove stale label or comment." + days-before-stale: -1 + days-before-close: -1 + stale-pr-message: "This pull request is stale because it has been open 14 days with no activity. To keep this pull request open remove stale label or comment." stale-pr-label: "status:stale" close-pr-message: "This pull request was closed because it has been stale for 14 days with no activity. If this pull request is important or you have more to add feel free to re-open it." - days-before-pr-stale: 60 - days-before-close: 14 - exempt-issue-labels: "status:in-progress,status:roadmap,status:accepted" + days-before-pr-stale: 14 + days-before-pr-close: 14 + exempt-issue-labels: "needs:attention,needs:triage,blocked" ascending: true # https://github.com/actions/stale#ascending operations-per-run: 500 diff --git a/.github/workflows/static-analysis.yaml b/.github/workflows/static-analysis.yaml index bb0cbc1c37d8..e91a12f721e4 100644 --- a/.github/workflows/static-analysis.yaml +++ b/.github/workflows/static-analysis.yaml @@ -15,7 +15,7 @@ permissions: # # https://docs.github.com/en/actions/using-jobs/using-concurrency concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: @@ -24,12 +24,12 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: persist-credentials: false - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.9 diff --git a/.github/workflows/ui-tests.yml b/.github/workflows/ui-tests.yml index 4c903abd3fb1..0fc03802812d 100644 --- a/.github/workflows/ui-tests.yml +++ b/.github/workflows/ui-tests.yml @@ -23,7 +23,7 @@ permissions: # # https://docs.github.com/en/actions/using-jobs/using-concurrency concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: ${{ github.event_name == 'pull_request' }} jobs: @@ -32,9 +32,9 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: node-version-file: '.nvmrc' cache-dependency-path: '**/package-lock.json' diff --git a/.github/workflows/validate_worker_metadata.yaml b/.github/workflows/validate_worker_metadata.yaml new file mode 100644 index 000000000000..7ac18ef25573 --- /dev/null +++ b/.github/workflows/validate_worker_metadata.yaml @@ -0,0 +1,28 @@ +name: Ensure JSON views are valid on PR +on: + pull_request: + branches: + - main + paths: + - "src/prefect/server/api/collections_data/views/*.json" +jobs: + submit-update-pr: + name: Run JSON schema validation against all views + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install fastjsonschema==2.19.1 + + - name: Run JSON schema validation + run: | + python scripts/validate_collection_view_content.py diff --git a/.github/workflows/windows-pull-request.yaml b/.github/workflows/windows-pull-request.yaml new file mode 100644 index 000000000000..1871328bc009 --- /dev/null +++ b/.github/workflows/windows-pull-request.yaml @@ -0,0 +1,76 @@ +name: Windows tests (Pull Request) + +on: + pull_request: + branches: + - main + types: + - opened + - reopened + - synchronize + - labeled + - unlabeled + +permissions: {} + +# Limit concurrency by workflow/branch combination. +# +# For pull request builds, pushing additional changes to the +# branch will cancel prior in-progress and pending builds. +# +# For builds triggered on a branch push, additional changes +# will wait for prior builds to complete before starting. +# +# https://docs.github.com/en/actions/using-jobs/using-concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + run-tests-sqlite: + name: Test with SQLite + if: contains(github.event.pull_request.labels.*.name, 'arch:windows') + + strategy: + matrix: + python-version: + - "3.9" + - "3.10" + - "3.11" + - "3.12" + + fail-fast: true + + permissions: + contents: read + + runs-on: windows-latest + timeout-minutes: 45 + + env: + # enable colored output + # https://github.com/pytest-dev/pytest/issues/7443 + PY_COLORS: 1 + + steps: + - uses: actions/checkout@v4 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + cache-dependency-path: "requirements*.txt" + + - name: Install packages + run: | + python -m pip install --upgrade pip + pip install --upgrade --upgrade-strategy eager -e .[dev] + + - name: Run tests + run: | + # Parallelize tests by scope to reduce expensive service fixture duplication + pytest tests -vv --numprocesses auto --dist worksteal --exclude-services --durations=25 diff --git a/.github/workflows/windows-tests.yaml b/.github/workflows/windows-tests.yaml index dc37dd4fd5ea..6b2375c0c1e7 100644 --- a/.github/workflows/windows-tests.yaml +++ b/.github/workflows/windows-tests.yaml @@ -1,16 +1,11 @@ name: Windows tests -env: - # enable colored output - # https://github.com/pytest-dev/pytest/issues/7443 - PY_COLORS: 1 - on: + workflow_dispatch: {} schedule: - cron: '0 16 * * *' # every day at 4 p.m. UTC / 9 a.m. PDT -permissions: - contents: read +permissions: {} jobs: run-tests-sqlite: @@ -18,30 +13,33 @@ jobs: strategy: matrix: - # We only test Windows against 3.9 currently. python-version: - "3.9" + - "3.10" + - "3.11" + - "3.12" + + fail-fast: true - fail-fast: false + permissions: + contents: read runs-on: windows-latest timeout-minutes: 45 - steps: - - name: Display current test matrix - run: echo '${{ toJSON(matrix) }}' + env: + # enable colored output + # https://github.com/pytest-dev/pytest/issues/7443 + PY_COLORS: 1 - - uses: actions/checkout@v3 + steps: + - uses: actions/checkout@v4 with: persist-credentials: false fetch-depth: 0 - - name: Set up Docker Buildx - if: ${{ matrix.build-docker-images }} - uses: docker/setup-buildx-action@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: "pip" @@ -50,9 +48,9 @@ jobs: - name: Install packages run: | python -m pip install --upgrade pip - pip install--upgrade --upgrade-strategy eager -e .[dev] + pip install --upgrade --upgrade-strategy eager -e .[dev] - name: Run tests run: | # Parallelize tests by scope to reduce expensive service fixture duplication - pytest tests -vv --numprocesses auto --dist loadscope --exclude-services --durations=25 + pytest tests -vv --numprocesses auto --dist worksteal --exclude-services --durations=25 diff --git a/.gitignore b/.gitignore index d59e631cd674..3cd4730f4f31 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ dist/ sdist/ # Test artifacts +.benchmarks/ .coverage .coverage.*.* .prefect-results @@ -36,12 +37,14 @@ env/ venv/ # Documentation artifacts -schema.json +# gschema.json site/ .cache/ +src/mkdocs-material # UI artifacts src/prefect/server/ui/* +src/prefect/server/ui_build/* **/node_modules # Databases @@ -60,3 +63,16 @@ dask-worker-space/ .idea/ .vscode/ !ui/.vscode/ + +# Prefect files +prefect.yaml + +# Deployment recipes +!src/prefect/deployments/recipes/*/** + +# For development doc server if link +libcairo.2.dylib + +# setuptools-scm generated files +src/integrations/*/**/_version.py +*.log \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000000..14b509f0c4d5 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "compat-tests"] + path = compat-tests + url = https://github.com/PrefectHQ/compat-tests.git diff --git a/.nvmrc b/.nvmrc index 97dcb79e026a..6aab9b43fa34 100644 --- a/.nvmrc +++ b/.nvmrc @@ -1 +1 @@ -v16.5.0 +v18.18.0 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8170a6e46ebc..124a79752928 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,21 +1,26 @@ repos: - - repo: https://github.com/fsouza/autoflake8 - rev: v0.4.0 - hooks: - - id: autoflake8 - language_version: python3 - args: [ - '--in-place', - '--exclude','*/utilities/compat.py,*/utilities/slugify.py,**__init__.py', - ] - - repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - language_version: python3 - - repo: https://github.com/psf/black - rev: 23.1.0 - hooks: - - id: black - language_version: python3 - args: ['--preview'] + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: "v0.2.1" + hooks: + - id: ruff + language_version: python3 + args: [--fix, --exit-non-zero-on-fix, --show-fixes] + - id: ruff-format + - repo: https://github.com/codespell-project/codespell + rev: v2.2.6 + hooks: + - id: codespell + exclude: package-lock.json|_vendor/.*|docs/.* + - repo: https://github.com/netromdk/vermin + rev: v1.6.0 + hooks: + - id: vermin + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.9.0 + hooks: + - id: mypy + additional_dependencies: + - pydantic>=1.10.0,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0 + - types-cachetools==5.3.0.5 + - types-pyyaml==6.0.12.9 + files: ^(src/prefect/server/models/agents\.py|src/prefect/server/models/flows\.py|src/prefect/concurrency/.*|src/prefect/events/.*|src/prefect/input/.*)$ diff --git a/.ruff.toml b/.ruff.toml new file mode 100644 index 000000000000..acdf7c1b764d --- /dev/null +++ b/.ruff.toml @@ -0,0 +1,26 @@ +src = ["src"] + +# Use Ruff for sorting imports +lint.extend-select = ["I"] + +# Do not enforce line length; black does this for code and we do not care about comments / docs +lint.ignore = ["E501"] + +[lint.per-file-ignores] +# Do not enforce usage and import order rules in init files +"__init__.py" = ["E402", "F401", "I"] + +# Do not fix import in compatibility module +"src/prefect/utilities/compat.py" = ["F401", "I"] + +# Allow wild imports in conftest +"tests/conftest.py" = ["F405", "E402", "F403"] + +# Allow fake items in __all__ for runtime +"src/prefect/runtime/*" = ["F822"] + +# Do not enforce line length limits in migrations +"src/prefect/server/database/migrations/**/*" = ["E501"] + +[lint.isort] +known-third-party = [] diff --git a/Dockerfile b/Dockerfile index 6072ce9612d1..4d58f910030d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # The version of Python in the final image -ARG PYTHON_VERSION=3.8 +ARG PYTHON_VERSION=3.9 # The base image to use for the final image; Prefect and its Python requirements will # be installed in this image. The default is the official Python slim image. # The following images are also available in this file: @@ -7,7 +7,7 @@ ARG PYTHON_VERSION=3.8 # Any image tag can be used, but it must have apt and pip. ARG BASE_IMAGE=python:${PYTHON_VERSION}-slim # The version used to build the Python distributable. -ARG BUILD_PYTHON_VERSION=3.8 +ARG BUILD_PYTHON_VERSION=3.9 # THe version used to build the UI distributable. ARG NODE_VERSION=16.15 # Any extra Python requirements to install @@ -20,8 +20,8 @@ WORKDIR /opt/ui RUN apt-get update && \ apt-get install --no-install-recommends -y \ - # Required for arm64 builds - chromium \ + # Required for arm64 builds + chromium \ && apt-get clean && rm -rf /var/lib/apt/lists/* # Install a newer npm to avoid esbuild errors @@ -29,11 +29,10 @@ RUN npm install -g npm@8 # Install dependencies separately so they cache COPY ./ui/package*.json ./ -RUN npm ci install +RUN npm ci # Build static UI files COPY ./ui . -ENV PREFECT_UI_SERVE_BASE="/" RUN npm run build @@ -46,8 +45,8 @@ WORKDIR /opt/prefect RUN apt-get update && \ apt-get install --no-install-recommends -y \ - gpg \ - git=1:2.* \ + gpg \ + git=1:2.* \ && apt-get clean && rm -rf /var/lib/apt/lists/* # Copy the repository in; requires full git history for versions to generate correctly @@ -96,17 +95,17 @@ WORKDIR /opt/prefect # - git: Required for retrieving workflows from git sources RUN apt-get update && \ apt-get install --no-install-recommends -y \ - tini=0.19.* \ - build-essential \ - git=1:2.* \ + tini=0.19.* \ + build-essential \ + git=1:2.* \ && apt-get clean && rm -rf /var/lib/apt/lists/* # Pin the pip version -RUN python -m pip install --no-cache-dir pip==22.3.1 +RUN python -m pip install --no-cache-dir pip==23.3.1 # Install the base requirements separately so they cache -COPY requirements.txt ./ -RUN pip install --upgrade --no-cache-dir -r requirements.txt +COPY requirements-client.txt requirements.txt ./ +RUN pip install --upgrade --upgrade-strategy eager --no-cache-dir -r requirements.txt # Install prefect from the sdist COPY --from=python-builder /opt/prefect/dist ./dist diff --git a/MANIFEST.in b/MANIFEST.in index 3b243569c550..9aced7c33df8 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -11,15 +11,17 @@ include setup.cfg include versioneer.py include requirements.txt include requirements-dev.txt +include requirements-client.txt include src/prefect/_version.py include src/prefect/py.typed include src/prefect/profiles.toml -include src/prefect/projects/recipes/*/*.yaml -include src/prefect/projects/templates/*.yaml +include src/prefect/deployments/recipes/*/*.yaml +include src/prefect/deployments/templates/*.yaml include src/prefect/.prefectignore include src/prefect/logging/logging.yml include src/prefect/cli/templates/*.yaml include src/prefect/server/collection_blocks_data.json +include src/prefect/server/api/collections_data/views/*.json # Migrations include src/prefect/server/database/alembic.ini diff --git a/Makefile b/Makefile new file mode 100644 index 000000000000..aa76b509654e --- /dev/null +++ b/Makefile @@ -0,0 +1,11 @@ +.PHONY: docs + +docs: + @if [ ! -x "./scripts/serve_docs" ]; then \ + echo "Error: The 'serve_docs' script is not executable."; \ + echo "Please make it executable by running:"; \ + echo " chmod +x \"./scripts/serve_docs\""; \ + echo "Then, run 'make docs' again."; \ + exit 1; \ + fi + @./scripts/serve_docs \ No newline at end of file diff --git a/README.md b/README.md index d97c03e80790..c6e0e415b361 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -

+

@@ -10,7 +10,7 @@
- + @@ -20,7 +20,26 @@ # Prefect -Prefect is an orchestrator for data-intensive workflows. It's the simplest way to transform any Python function into a unit of work that can be observed and orchestrated. With Prefect, you can build resilient, dynamic workflows that react to the world around them and recover from unexpected changes. With just a few decorators, Prefect supercharges your code with features like automatic retries, distributed execution, scheduling, caching, and much more. Every activity is tracked and can be monitored with the Prefect server or Prefect Cloud dashboard. +Prefect is an orchestration and observability platform for building, observing, and triaging workflows. +It's the simplest way to transform Python code into an interactive workflow application. + +Prefect allows you to expose your workflows through an API so teams dependent on you can programmatically access your pipelines, business logic, and more. +Prefect also allows you to standardize workflow development and deployment across your organization. + +With Prefect, you can build resilient, dynamic workflows that react to the world around them and recover from unexpected changes. +With just a few decorators, Prefect supercharges your code with features like automatic retries, distributed execution, scheduling, caching, and much more. + +Every activity is tracked and can be monitored with a self-hosted [Prefect server](https://docs.prefect.io/latest/guides/host/) instance or managed [Prefect Cloud](https://www.prefect.io/cloud-vs-oss?utm_source=oss&utm_medium=oss&utm_campaign=oss_gh_repo&utm_term=none&utm_content=none) dashboard. + +## Getting started + +Prefect requires Python 3.9 or later. To [install Prefect](https://docs.prefect.io/getting-started/installation/), run the following command: + +```bash +pip install prefect +``` + +Then create and run a Python file that uses Prefect `flow` and `task` decorators to orchestrate and observe your workflow - in this case, a simple script that fetches the number of GitHub stars from a repository: ```python from prefect import flow, task @@ -28,7 +47,7 @@ from typing import List import httpx -@task(retries=3) +@task(log_prints=True) def get_stars(repo: str): url = f"https://api.github.com/repos/{repo}" count = httpx.get(url).json()["stargazers_count"] @@ -42,35 +61,56 @@ def github_stars(repos: List[str]): # run the flow! -github_stars(["PrefectHQ/Prefect"]) +if __name__=="__main__": + github_stars(["PrefectHQ/Prefect"]) ``` -After running some flows, fire up the Prefect UI to see what happened: +Fire up the Prefect UI to see what happened: ```bash prefect server start ``` -![](/docs/img/ui/flow-run-page.png) +![Prefect UI dashboard](https://github.com/PrefectHQ/prefect/blob/main/docs/images/cloud-overview1.png?raw=true) + +To run your workflow on a schedule, turn it into a deployment and schedule it to run every minute by changing the last line of your script to the following: + +```python + github_stars.serve(name="first-deployment", cron="* * * * *") +``` + +You now have a server running locally that is looking for scheduled deployments! +Additionally you can run your workflow manually from the UI or CLI - and if you're using Prefect Cloud, you can even run deployments in response to [events](https://docs.prefect.io/latest/concepts/automations/). -From here, you can continue to use Prefect interactively or [deploy your flows](https://docs.prefect.io/concepts/deployments) to remote envirnments, running on a scheduled or event-driven basis. +## Prefect Cloud -## Getting Started +Stop worrying about your workflows. +Prefect Cloud allows you to centrally deploy, monitor, and manage the data workflows you support. With managed orchestration, automations, and webhooks, all backed by enterprise-class security, build production-ready code quickly and reliably. -Prefect requires Python 3.7 or later. To [install Prefect](https://docs.prefect.io/getting-started/installation/), run the following command in a shell or terminal session: +Read more about Prefect Cloud [here](https://www.prefect.io/cloud-vs-oss?utm_source=oss&utm_medium=oss&utm_campaign=oss_gh_repo&utm_term=none&utm_content=none) or sign up to [try it for yourself](https://app.prefect.cloud?utm_source=oss&utm_medium=oss&utm_campaign=oss_gh_repo&utm_term=none&utm_content=none). -```bash -pip install prefect -``` +![Prefect Automations](https://github.com/PrefectHQ/prefect/blob/main/docs/images/automations-1.png?raw=true) +![Prefect Automations](https://github.com/PrefectHQ/prefect/blob/main/docs/images/automations-2.png?raw=true) +![Prefect Automations](https://github.com/PrefectHQ/prefect/blob/main/docs/images/automations-4.png?raw=true) + + +## prefect-client + +If your use case is geared towards communicating with Prefect Cloud or a remote Prefect server, check out our +[prefect-client](https://pypi.org/project/prefect-client/). It was designed to be a lighter-weight option for accessing +client-side functionality in the Prefect SDK and is ideal for use in ephemeral execution environments. + +## Next steps -Start by then exploring the [core concepts of Prefect workflows](https://docs.prefect.io/concepts/), then follow one of our [friendly tutorials](https://docs.prefect.io/tutorials/first-steps) to learn by example. +There's lots more you can do to orchestrate and observe your workflows with Prefect! +Start with our [friendly tutorial](https://docs.prefect.io/tutorials) or explore the [core concepts of Prefect workflows](https://docs.prefect.io/concepts/). ## Join the community -Prefect is made possible by the fastest growing community of thousands of friendly data engineers. Join us in building a new kind of workflow system. The [Prefect Slack community](https://prefect.io/slack) is a fantastic place to learn more abou Prefect, ask questions, or get help with workflow design. The [Prefect Discourse](https://discourse.prefect.io/) is an community-driven knowledge base to find answers to your Prefect-related questions. All community forums, including code contributions, issue discussions, and slack messages are subject to our [Code of Conduct](https://discourse.prefect.io/faq). +Prefect is made possible by the fastest growing community of thousands of friendly data engineers. Join us in building a new kind of workflow system. The [Prefect Slack community](https://prefect.io/slack) is a fantastic place to learn more about Prefect, ask questions, or get help with workflow design. All community forums, including code contributions, issue discussions, and slack messages are subject to our [Code of Conduct](https://discourse.prefect.io/faq). ## Contribute See our [documentation on contributing to Prefect](https://docs.prefect.io/contributing/overview/). -Thanks for being part of the mission to build a new kind of workflow system and, of course, **happy engineering!** +Thanks for being part of the mission to build a new kind of workflow system and, of course, **happy engineering!** \ No newline at end of file diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index de5c7038e92a..82f2d9b9a3e1 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -1,87 +1,4870 @@ # Prefect Release Notes +## Release 3.0.0rc1 + +We're excited to announce the release candidate of Prefect 3.0. It's the most flexible, powerful, fastest version of Prefect yet. Prefect 3.0 includes several exciting new features. Install it by running `pip install prefect==3.0.0rc1` and check out the docs [here](https://docs-3.prefect.io/3.0rc/getting-started/index). + +### Run tasks independently of flows + +You can now run and serve tasks outside of flows and inside of other tasks. + +```python +from prefect import task + +@task +def my_background_task(name: str): + print(f"Hello, {name}!") + +if __name__ == "__main__": + my_background_task.delay("ford") +``` + +See the following pull requests for implementation details: +- https://github.com/PrefectHQ/prefect/pull/13695 +- https://github.com/PrefectHQ/prefect/pull/13692 +- https://github.com/PrefectHQ/prefect/pull/13681 +- https://github.com/PrefectHQ/prefect/pull/13662 +- https://github.com/PrefectHQ/prefect/pull/13653 +- https://github.com/PrefectHQ/prefect/pull/13643 +- https://github.com/PrefectHQ/prefect/pull/13589 +- https://github.com/PrefectHQ/prefect/pull/13684 +- https://github.com/PrefectHQ/prefect/pull/13676 +- https://github.com/PrefectHQ/prefect/pull/13276 +- https://github.com/PrefectHQ/prefect/pull/13611 +- https://github.com/PrefectHQ/prefect/pull/13547 +- https://github.com/PrefectHQ/prefect/pull/13706 + +### Transactional semantics + +Use rollback and commit hooks to facilitate idempotent python code. + +```python +from prefect import flow, task +from prefect.transactions import transaction +@task +def first_task(): + print('first') + +@first_task.on_rollback +def roll(txn): + print('rolling back') + +@task +def second_task(): + raise RuntimeError("oopsie") + +@flow +def txn_flow(): + with transaction(): + first_task() + second_task() +if __name__ == "__main__": + txn_flow() +``` +See the following pull requests for implementation details: +- https://github.com/PrefectHQ/prefect/pull/13559 +- https://github.com/PrefectHQ/prefect/pull/13534 +- https://github.com/PrefectHQ/prefect/pull/13535 +- https://github.com/PrefectHQ/prefect/pull/13480 +- https://github.com/PrefectHQ/prefect/pull/13452 +- https://github.com/PrefectHQ/prefect/pull/13450 +- https://github.com/PrefectHQ/prefect/pull/13484 +- https://github.com/PrefectHQ/prefect/pull/13454 +- https://github.com/PrefectHQ/prefect/pull/13477 +- https://github.com/PrefectHQ/prefect/pull/13431 +- https://github.com/PrefectHQ/prefect/pull/13264 +- https://github.com/PrefectHQ/prefect/pull/13337 +- https://github.com/PrefectHQ/prefect/pull/13456 +- https://github.com/PrefectHQ/prefect/pull/13572 +- https://github.com/PrefectHQ/prefect/pull/13582 +- https://github.com/PrefectHQ/prefect/pull/13627 +- https://github.com/PrefectHQ/prefect/pull/13568 +- https://github.com/PrefectHQ/prefect/pull/13438 +- https://github.com/PrefectHQ/prefect/pull/13573 +- https://github.com/PrefectHQ/prefect/pull/13578 +- https://github.com/PrefectHQ/prefect/pull/13414 + +### Open source Events and Automations +Trigger actions, such as sending notifications, pausing schedules, starting flow runs and more in response to Prefect events. + +See the following pull requests for implementation details: +- https://github.com/PrefectHQ/prefect/pull/13293 +- https://github.com/PrefectHQ/prefect/pull/13521 +- https://github.com/PrefectHQ/prefect/pull/13335 + +### More flexible variables and new artifact types +Variables can now be any JSON compatible type including dicts, lists, and integers. Progress and Image artifacts make it easy to add visual annotations to your flow run graph. + +See the following pull requests for implementation details: +- https://github.com/PrefectHQ/prefect/pull/13500 +- https://github.com/PrefectHQ/prefect/pull/13520 +- https://github.com/PrefectHQ/prefect/pull/13469 +- https://github.com/PrefectHQ/prefect/pull/13641 +- https://github.com/PrefectHQ/prefect/pull/13605 + +### Faster and richer CLI + +Improved CLI speed and several added commands and conveniences. + +See the following pull requests for implementation details: +- https://github.com/PrefectHQ/prefect/pull/13292 +- https://github.com/PrefectHQ/prefect/pull/13596 +- https://github.com/PrefectHQ/prefect/pull/13606 +- https://github.com/PrefectHQ/prefect/pull/13533 + +### Updated navigation, styling, and interaction design +The new Runs page displays both flow and task run information, and an improved sidebar and switcher makes navigating Prefect simpler than ever. + +See the following pull requests for implementation details: +- https://github.com/PrefectHQ/prefect/pull/13395 +- https://github.com/PrefectHQ/prefect/pull/13280 +- https://github.com/PrefectHQ/prefect/pull/13696 +- https://github.com/PrefectHQ/prefect/pull/13668 +- https://github.com/PrefectHQ/prefect/pull/13670 +- https://github.com/PrefectHQ/prefect/pull/13723 + + + +### Enhancements +- Create artifact for unsuccessful dbt task runs — https://github.com/PrefectHQ/prefect/pull/13348 +- Add filter on `task_run.expected_start_time` — https://github.com/PrefectHQ/prefect/pull/13491 +- Add utilities to serialize context to a dictionary and hydrate context from a dictionary — https://github.com/PrefectHQ/prefect/pull/13529 +- Add API endpoints for deployment count and next flow run — https://github.com/PrefectHQ/prefect/pull/13544 +- Allow flow parameter schema generation when dependencies are missing — https://github.com/PrefectHQ/prefect/pull/13315 +- Change the default value for `enforce_parameter_schema` from `False` to `True` — https://github.com/PrefectHQ/prefect/pull/13594 +- Migrate schemas to pydantic v2 — https://github.com/PrefectHQ/prefect/pull/13574 +- Removes block auto-instrumentation — https://github.com/PrefectHQ/prefect/pull/13407 +- Migrate all uses of the banned characters validation to a self-validator — https://github.com/PrefectHQ/prefect/pull/13370 +- Ignore and warn on unrecognized settings - https://github.com/PrefectHQ/prefect/pull/13624 + +### Fixes +- Remove unnecessary flow run infrastructure override access checks — https://github.com/PrefectHQ/prefect/pull/13401 +- Enforce False case when flow run id is null — https://github.com/PrefectHQ/prefect/pull/13464 +- Fix workspace variable hydration to allow for JSON data — https://github.com/PrefectHQ/prefect/pull/13548 +- Remove unused settings/experimental work pool flags: `PREFECT_EXPERIMENTAL_ENABLE_WORK_POOLS` and `PREFECT_EXPERIMENTAL_WARN_WORK_POOLS` — https://github.com/PrefectHQ/prefect/pull/13144 +- Pin `pydantic>=2.7` for `Secret` — https://github.com/PrefectHQ/prefect/pull/13613 +- Skip on cancellation hooks if runner can't load flow — https://github.com/PrefectHQ/prefect/pull/13660 +- Refactor lazy imports to avoid accidental eager imports — https://github.com/PrefectHQ/prefect/pull/13296 +- Allow block registration to use client schemas for server model creation — https://github.com/PrefectHQ/prefect/pull/13602 +- Replace our customized `Duration` types with plain `timedelta`s — https://github.com/PrefectHQ/prefect/pull/13603 + +### Experimental +- Add `prefect.yaml` and cli support for new schedule fields — https://github.com/PrefectHQ/prefect/pull/13318 + +### Documentation +- Transition documentation hosting from Netlify to Mintlify — https://github.com/PrefectHQ/prefect/pull/13634 +- Add Python 3.12 to list of Docker images — https://github.com/PrefectHQ/prefect/pull/13321 +- Update `index.md` — https://github.com/PrefectHQ/prefect/pull/13353 +- Improve tutorial section — https://github.com/PrefectHQ/prefect/pull/13297 +- Fix jinja template in automations doc — https://github.com/PrefectHQ/prefect/pull/13422 +- Update development section docs — https://github.com/PrefectHQ/prefect/pull/13247 +- Update Ray integration docs — https://github.com/PrefectHQ/prefect/pull/13467 +- Update Variables docs to include JSON types — https://github.com/PrefectHQ/prefect/pull/13493 +- Update quickstart guide for usability — https://github.com/PrefectHQ/prefect/pull/13562 +- Remove `deployments-block-based` concept page and refs for 3.0 — https://github.com/PrefectHQ/prefect/pull/13626 +- Remove `infrastructure` concept page and refs for 3.0 — https://github.com/PrefectHQ/prefect/pull/13629 +- Update docs image paths and remove outdated images — https://github.com/PrefectHQ/prefect/pull/13666 +- Remove references to `prefect.software` from docs — https://github.com/PrefectHQ/prefect/pull/13382 +- Update `host.md` — https://github.com/PrefectHQ/prefect/pull/13351 +- Simplify rate limits page — https://github.com/PrefectHQ/prefect/pull/13689 +- Removing references to deprecated block types and add disclaimer — https://github.com/PrefectHQ/prefect/pull/13651 +- Update guides — https://github.com/PrefectHQ/prefect/pull/13253 +- Remove `storage` concept page and refs - https://github.com/PrefectHQ/prefect/pull/13630 + +### Integrations +- Migrate `prefect-dbt` to pydantic v2 - https://github.com/PrefectHQ/prefect/pull/13718 +- Migrate `prefect-email` to pydantic v2 — https://github.com/PrefectHQ/prefect/pull/13654 +- Migrate `prefect-slack` to pydantic v2 — https://github.com/PrefectHQ/prefect/pull/13673 +- Migrate `prefect-shell` to pydantic v2 — https://github.com/PrefectHQ/prefect/pull/13675 +- Migrate `prefect-gcp` to pydantic v2 — https://github.com/PrefectHQ/prefect/pull/13650 +- Migrate `prefect-github` to pydantic v2 — https://github.com/PrefectHQ/prefect/pull/13655 +- Migrate `prefect-gitlab` to pydantic v2 — https://github.com/PrefectHQ/prefect/pull/13656 +- Migrate `prefect-docker` to pydantic v2 - https://github.com/PrefectHQ/prefect/pull/13697 +- Migrate `prefect-sqlalchemy` to pydantic v2 - https://github.com/PrefectHQ/prefect/pull/13700 +- Add `PrefectDistributedClient` to `prefect-dask` — https://github.com/PrefectHQ/prefect/pull/13537 +- Update `RayTaskRunner` for compatibility with new engine — https://github.com/PrefectHQ/prefect/pull/13575 +- Update `DaskTaskRunner` for compatibility with the updated engine — https://github.com/PrefectHQ/prefect/pull/13555 +- prefect-dbt artifact consolidation and markdown fixes — https://github.com/PrefectHQ/prefect/pull/13379 +- prefect-dbt - Cause unsuccessful dbt tasks to fail — https://github.com/PrefectHQ/prefect/pull/13405 +- DBT Tasks extra_command_args Fix — https://github.com/PrefectHQ/prefect/pull/13308 +- Update dbt-core dependency — https://github.com/PrefectHQ/prefect/pull/13394 + +### Breaking Changes +- Remove `prefect deployment build` CLI from `main` — https://github.com/PrefectHQ/prefect/pull/13366 +- Remove `prefect agent` CLI from `main` — https://github.com/PrefectHQ/prefect/pull/13365 +- Remove `prefect deployment apply` CLI from `main` — https://github.com/PrefectHQ/prefect/pull/13367 +- Remove `PrefectAgent` class — https://github.com/PrefectHQ/prefect/pull/13374 +- Remove `prefect.software` — https://github.com/PrefectHQ/prefect/pull/13375 +- Remove `deployments` module — https://github.com/PrefectHQ/prefect/pull/13373 +- Remove `EcsTask` from `main` — https://github.com/PrefectHQ/prefect/pull/13417 +- Remove `AzureContainerInstanceJob` from `main` — https://github.com/PrefectHQ/prefect/pull/13418 +- Remove `VertexAICustomTrainingJob` from `main` — https://github.com/PrefectHQ/prefect/pull/13419 +- Remove `CloudRunJob` from `main` — https://github.com/PrefectHQ/prefect/pull/13420 +- Remove infrastructure blocks from `main` — https://github.com/PrefectHQ/prefect/pull/13424 +- Remove `Infrastructure`, `BlockWorker` from `main` — https://github.com/PrefectHQ/prefect/pull/13430 +- Remove deprecated storage blocks from `main` — https://github.com/PrefectHQ/prefect/pull/13410 +- Remove `prefect-agent` as a possible work pool type — https://github.com/PrefectHQ/prefect/pull/13444 +- Remove old engine — https://github.com/PrefectHQ/prefect/pull/13542 +- Remove Python 3.8 support — https://github.com/PrefectHQ/prefect/pull/13331 +- Remove `deprecated` module and its references — https://github.com/PrefectHQ/prefect/pull/13345 +- Remove old task runners and futures modules — https://github.com/PrefectHQ/prefect/pull/13593 +- Remove `is_state` — https://github.com/PrefectHQ/prefect/pull/13569 +- Remove deprecated options from `prefect work-queue` and refs to agents - https://github.com/PrefectHQ/prefect/pull/13638 + +### Contributors +- @bsignoret +* @jaraics made their first contribution in https://github.com/PrefectHQ/prefect/pull/13144 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.19.0...3.0rc1 + +## Release 2.19.3 + +### New method for generating parameter schemas without dependencies + +`prefect deploy` now works even when dependencies are missing from the current environment. This can speed up deployment via CI by removing the need to install dependencies before deploying your flows. + +See the following pull requests for implementation details: +- https://github.com/PrefectHQ/prefect/pull/13620 +- https://github.com/PrefectHQ/prefect/pull/13315 + +### Enhancements +- Provide URL in CLI output upon work pool creation — https://github.com/PrefectHQ/prefect/pull/13597 + +### Fixes +- Ensure graceful cancellation of flow runs corresponding to deleted deployments — https://github.com/PrefectHQ/prefect/pull/13669 + +### Integrations +- Add loading state to concurrency limits table in the Prefect UI — https://github.com/PrefectHQ/prefect-ui-library/pull/2483 +- Remove old schema properties from deployments in the Prefect UI — https://github.com/PrefectHQ/prefect-ui-library/pull/2482 +- Add handling for multi-word dbt CLI commands — https://github.com/PrefectHQ/prefect/pull/13616 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.19.2...2.19.3 + + +## Release 2.19.0 + +### Support for major infrastructure and distributed task integrations +As `prefect-dask` and other integrations have been added to the `prefect` codebase, this release adds these integrations as `extra` requirements of the `prefect` package, making it easier to install support for everything in your Prefect stack. + +```bash +pip install prefect[dask] +``` + +We loved this community contribution so much, we did it for all our first-party integrations. + +```bash +pip install prefect[aws,kubernetes,dask,dbt,sqlalchemy,slack] +``` + +You can see the full list of Prefect's `extra` requirements in [our `setup.py`](https://github.com/PrefectHQ/prefect/blob/main/setup.py#L43). + +See the following pull requests for implementation details: +- https://github.com/PrefectHQ/prefect/pull/13289 +- https://github.com/PrefectHQ/prefect/pull/13310 +- https://github.com/PrefectHQ/prefect/pull/13320 + +### Support for timeout seconds in global concurrency context manager +You may want to fail immediately if a global concurrency slot is unavailable. Rather than block and wait, you can now specify a `timeout_seconds` argument in the global concurrency context manager and catch a `TimeoutError` if a slot is not available within the specified time. + +```python +@flow +def fail_immediately_flow(): + try: + with concurrency("there-can-be-only-one", occupy=1, timeout_seconds=0.1): + do_something_resource_intensive() + except TimeoutError: + return Cancelled(message="Another flow run is already running") +``` + +See the following pull request for implementation details: +- https://github.com/PrefectHQ/prefect/pull/13262 + +### Manage global concurrency limits via the CLI +Global concurrency limits let you control how many operations can run simultaneously-- now you can create, read, edit, and delete global concurrency limits via the Prefect CLI! + +To create a new concurrency limit, use the `prefect gcl create` command. You must specify a `--limit` argument, and can optionally specify a `--slot-decay-per-second` and `--disable` argument. + +```bash +prefect gcl create my-concurrency-limit --limit 5 --slot-decay-per-second 1.0 +``` + +You can inspect the details of a concurrency limit using the `prefect gcl inspect` command: + +```bash +prefect gcl inspect my-concurrency-limit +``` + +To update a concurrency limit, use the `prefect gcl update` command. You can update the `--limit`, `--slot-decay-per-second`, `--enable`, and `--disable` arguments: + +```bash +prefect gcl update my-concurrency-limit --limit 10 +``` + +See all available commands and options by running `prefect gcl --help` or read our [docs](/docs/guides/global-concurrency-limits.md#managing-global-concurrency-limits-and-rate-limits). + +For implementation details, see the following pull requests: +- https://github.com/PrefectHQ/prefect/pull/13194 +- https://github.com/PrefectHQ/prefect/pull/13196 +- https://github.com/PrefectHQ/prefect/pull/13214 +- https://github.com/PrefectHQ/prefect/pull/13218 +- https://github.com/PrefectHQ/prefect/pull/13233 +- https://github.com/PrefectHQ/prefect/pull/13238 + +### Enhancements +- Remove registry conflict warning — https://github.com/PrefectHQ/prefect/pull/13155 +- Remove top-level Artifacts tab from Prefect UI: + - https://github.com/PrefectHQ/prefect/pull/13226 + - https://github.com/PrefectHQ/prefect/pull/13261 + +### Fixes +- Fix work pool base job template generation for `ECSTask` block — https://github.com/PrefectHQ/prefect/pull/13256 +- Fix selecting correct files when using ignore file in `GcsBucket`'s `put_directory` — https://github.com/PrefectHQ/prefect/pull/13290 +- Add `Resuming` flow runs to `BypassCancellingFlowRunsWithNoInfra` orchestration policy — https://github.com/PrefectHQ/prefect/pull/13299 +- Fix `apprise 1.8.0` imports — https://github.com/PrefectHQ/prefect/pull/13311 +- Remove `dataclass` from custom constrained types - https://github.com/PrefectHQ/prefect/pull/13257 + +### Experimental +#### Engine +- Add crash detection for flow runs — https://github.com/PrefectHQ/prefect/pull/13266 +- Consolidate run creation logic on Task — https://github.com/PrefectHQ/prefect/pull/13271 +- Skip timeout context if not needed — https://github.com/PrefectHQ/prefect/pull/13306 +- Add parent task tracking — https://github.com/PrefectHQ/prefect/pull/12915 +- Syncify task engine — https://github.com/PrefectHQ/prefect/pull/13234 +- Syncify flow engine — https://github.com/PrefectHQ/prefect/pull/13246 +- Use Prefect-specific `TestClient` for sync calls — https://github.com/PrefectHQ/prefect/pull/13265 +- Add new sync compatibility setting — https://github.com/PrefectHQ/prefect/pull/13224 + +#### Deployment Schedule Behavior +- Add new fields to `DeploymentSchedule` schemas — https://github.com/PrefectHQ/prefect/pull/13204 +- Allow both `active` and `schedule` parameters in `update_deployment_schedule` method — https://github.com/PrefectHQ/prefect/pull/13259 +- Update JSON schema validation for job varariables — https://github.com/PrefectHQ/prefect/pull/13182 + +### Documentation +- Update block concept page to reflect product updates — https://github.com/PrefectHQ/prefect/pull/13193 +- Update example repo links to `prefecthq` repos — https://github.com/PrefectHQ/prefect/pull/13258 +- Update storage guide — https://github.com/PrefectHQ/prefect/pull/13294 +- Update integration libraries — https://github.com/PrefectHQ/prefect/pull/13277 +- Update `Hosting a Prefect server instance` page — https://github.com/PrefectHQ/prefect/pull/13225 +- Simplify `prefect-aws` and `prefect-dbt` docs index pages — https://github.com/PrefectHQ/prefect/pull/13232 +- Expand discussion of resolution order for cloud-provider service auth — https://github.com/PrefectHQ/prefect/pull/13239 +- Fix repo url typo in storage guide — https://github.com/PrefectHQ/prefect/pull/13304 + +### Integrations +- Add pre-built Prefect DBT tasks — https://github.com/PrefectHQ/prefect/pull/12964 + +### Contributors +- @Andrew-S-Rosen + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.18.3...2.19.0 + +## Release 2.18.3 + +### Experimental +#### Engine +- Wire up new engine to deployment runs — https://github.com/PrefectHQ/prefect/pull/12914 + +### Fixes +- Fix parameters becoming unresponsive and disappearing in Prefect UI — https://github.com/PrefectHQ/prefect-ui-library/pull/2355 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.18.2...2.18.3 + +## Release 2.18.2 + +### Providing a deployment name to `flow.serve` is now optional + +When running `flow.serve`, you can now omit the deployment name. If you do not provide a deployment name, the deployment name will default to the name of the flow. This change makes it easier to run flows without needing to specify a deployment name each time: + +```python +@flow +def etl_flow(): + pass + +if __name__ == "__main__": + etl_flow.serve() +``` +results in: +```bash +Your flow 'etl-flow' is being served and polling for scheduled runs! + +To trigger a run for this flow, use the following command: + + $ prefect deployment run 'etl-flow/etl-flow' +``` + +See the following PR for implementation details: +- https://github.com/PrefectHQ/prefect/pull/13069 + +### Enhancements +- Add `PREFECT_SERVER_CSRF_PROTECTION_ENABLED` setting to UI settings — https://github.com/PrefectHQ/prefect/pull/13168 +- Allow case-insensitive state and state type handling when listing flow runs via CLI — https://github.com/PrefectHQ/prefect/pull/13152 + +### Fixes +- Fix deployment parameter defaults on Deployments page in the UI - https://github.com/PrefectHQ/prefect-ui-library/pull/2344 +- Sync value between form and JSON when entering flow parameters on the Deployments page in the UI - https://github.com/PrefectHQ/prefect-ui-library/pull/2342 +- Revert console setup changes to fix interactivity — https://github.com/PrefectHQ/prefect/pull/13145 +- Warn when work queues paused when starting a worker or agent — https://github.com/PrefectHQ/prefect/pull/13159 +- Standardize work pool type as `Process` — https://github.com/PrefectHQ/prefect/pull/13176 +- Raise a clearer error when deleting and inspecting blocks — https://github.com/PrefectHQ/prefect/pull/13136 +- Fix csrf race condition that caused some pages to not render content when refreshing — https://github.com/PrefectHQ/prefect/pull/13172 + +### Experimental +#### Events and Automations +- Add work queue status events — https://github.com/PrefectHQ/prefect/pull/12900 +- Add work pool status events — https://github.com/PrefectHQ/prefect/pull/13158 +- Add support for negative label values in `ResourceSpecification` and filters — https://github.com/PrefectHQ/prefect/pull/13192 +- Add automations SDK methods — https://github.com/PrefectHQ/prefect/pull/12830 +- Add a retention policy for events — https://github.com/PrefectHQ/prefect/pull/13160 +- Allow streaming OSS events via `prefect event stream` — https://github.com/PrefectHQ/prefect/pull/13161 +- Update `prefect automation inspect` to handle automations with same name — https://github.com/PrefectHQ/prefect/pull/12904 +- Update `automation pause` and `automation resume` to handle automations with same name — https://github.com/PrefectHQ/prefect/pull/13131 +- Rename `prefect.work-pool.not_ready` to `prefect.work-pool.not-ready` — https://github.com/PrefectHQ/prefect/pull/13202 +- Correct an issue that would cause the `work-queue.ready` event to overfire — https://github.com/PrefectHQ/prefect/pull/13117 + +#### Engine +- Add dedicated synchronous function handling — https://github.com/PrefectHQ/prefect/pull/12889 +- Add async `task.submit` support with new task engine — https://github.com/PrefectHQ/prefect/pull/13153 +- Fix subflow handling in new engine — https://github.com/PrefectHQ/prefect/pull/12913 +- Handle *args / **kwargs correctly — https://github.com/PrefectHQ/prefect/pull/13142 + +#### Deployment schedule behavior +- Add columns to ORM `DeploymentSchedule` and add migrations — https://github.com/PrefectHQ/prefect/pull/13186 +- Add server default for non-nullable deployment schedule column - https://github.com/PrefectHQ/prefect/pull/13206 + +### Integrations +- Add `keep_container_group` to ACI worker — https://github.com/PrefectHQ/prefect/pull/13143 +- Improve Vertex AI worker performance — https://github.com/PrefectHQ/prefect/pull/13139 +- Migrate `prefect-ray` to core — https://github.com/PrefectHQ/prefect/pull/12869 +- Log full output of databricks job — https://github.com/PrefectHQ/prefect/pull/13151 +- Update Snowflake Connector example in UI — https://github.com/PrefectHQ/prefect/pull/12903 +- Fix pydantic v1 prefect-databricks — https://github.com/PrefectHQ/prefect/pull/13166 +- Fix inclusion of commas in tag scrubbing — https://github.com/PrefectHQ/prefect/pull/13190 +- Handle empty `service_account_info` for cached Vertex client — https://github.com/PrefectHQ/prefect/pull/13175 +- Add `dlt-prefect` recipe — https://github.com/PrefectHQ/prefect/pull/13203 + +### Documentation +- Add third-party secrets guide — https://github.com/PrefectHQ/prefect/pull/13173 +- Update documentation on nested / autonomous tasks — https://github.com/PrefectHQ/prefect/pull/13154 +- Update Prefect Snowflake docs — https://github.com/PrefectHQ/prefect/pull/13171 +- Update prefect-dbt index page — https://github.com/PrefectHQ/prefect/pull/13187 +- Fix `az acr create` command in ACI worker guide — https://github.com/PrefectHQ/prefect/pull/12909 +- Update prefect-dbt index page - https://github.com/PrefectHQ/prefect/pull/13187 + +### Contributors +- @h2oa made their first contribution in https://github.com/PrefectHQ/prefect/pull/13157 +- @ConstantinoSchillebeeckx + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.18.1...2.18.2 + +## Release 2.18.1 + +### Fixes +- Fix improper context access for nested async task outside of flow — https://github.com/PrefectHQ/prefect/pull/12810 +- Fix using default interval schedule in `prefect deploy` — https://github.com/PrefectHQ/prefect/pull/12833 +- Handle case in `validationUpdate` schema where definitions are falsy — https://github.com/PrefectHQ/prefect/pull/12880 +- Allow `prefect cloud login` to override current workspace — https://github.com/PrefectHQ/prefect/pull/12867 +- Remove extra quotes in `prefect deployment run --watch` — https://github.com/PrefectHQ/prefect/pull/12894 + +### Experimental + +#### Events and Automations +- Support filtering by automation name: + - https://github.com/PrefectHQ/prefect/pull/12850 + - https://github.com/PrefectHQ/prefect/pull/12884 + - https://github.com/PrefectHQ/prefect/pull/12887 +- Add support for using the "normal" Trigger classes for `flow.serve` and `.deploy` — https://github.com/PrefectHQ/prefect/pull/12789 +- Add an account-level event subscriber — https://github.com/PrefectHQ/prefect/pull/12808 +- Emit flow run state change events — https://github.com/PrefectHQ/prefect/pull/12825 +- Emit deployment status persistence and events — https://github.com/PrefectHQ/prefect/pull/12853 +- Enable event streaming from `PrefectCloudEventSubscriber` via CLI — https://github.com/PrefectHQ/prefect/pull/12796 +- Update the `prefect automation delete` CLI — https://github.com/PrefectHQ/prefect/pull/12876 + +#### Engine +- Add new experimental engine for tasks and flows with improved readability and extensibility — https://github.com/PrefectHQ/prefect/pull/12856 + +### Documentation +- Improve installation instructions — https://github.com/PrefectHQ/prefect/pull/12783 +- Improve quickstart — https://github.com/PrefectHQ/prefect/pull/12798 +- Migrate `prefect-azure` docs to Integrations section of the Prefect docs — https://github.com/PrefectHQ/prefect/pull/12794 +- Update storage guide credentials blocks — https://github.com/PrefectHQ/prefect/pull/12819 +- Remove `server` import recommendations — https://github.com/PrefectHQ/prefect/pull/12823 +- Remove link to removed API page — https://github.com/PrefectHQ/prefect/pull/12824 +- Add Azure Container Instances worker guide — https://github.com/PrefectHQ/prefect/pull/12846 +- Improve wording on integrations index page — https://github.com/PrefectHQ/prefect/pull/12852 + +#### Prefect UI Library +- Add `FormattedDate` component to display accessible, long-form timestamps consistently +- Update modal buttons and add auto-close to the parameters and job variable modals — https://github.com/PrefectHQ/prefect-ui-library/pull/2320 +- Add flow run list information density — https://github.com/PrefectHQ/prefect-ui-library/pull/2321 +- Fix "Run a deployment" action not populating the default parameters from the deployment — https://github.com/PrefectHQ/prefect-ui-library/pull/2322 +- Fix schema form properties with no default value from defaulting to `null` (`None`) — https://github.com/PrefectHQ/prefect-ui-library/pull/2323 +- Update date-fns and date-fns-tz — https://github.com/PrefectHQ/prefect-ui-library/pull/2319 +- Use correct icon colors for non-destructive actions in the UI — https://github.com/PrefectHQ/prefect-ui-library/pull/2328 + +### Integrations +#### Prefect CGP +- Remove API ref to nonexistent Google Cloud Run V2 page — https://github.com/PrefectHQ/prefect-gcp/pull/260 +- Fix VPC access for Cloud v2 worker — https://github.com/PrefectHQ/prefect-gcp/pull/266 +- Handle case where `vpc` isn't in job template — https://github.com/PrefectHQ/prefect-gcp/pull/267 + +## New Contributors +* @keizobabybear made their first contribution in https://github.com/PrefectHQ/prefect/pull/12852 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.18.0...2.18.1 + +## Release 2.18.0 + +### Breaking Changes +- Deployment configuration update: The `prefect deploy` command now only supports the `prefect.yaml` file. The `deployment.yaml` file is no longer supported +following its deprecation last June. Users should update their deployment configurations to use `prefect.yaml` instead. Running `prefect deploy` on a version prior to 2.18.0 will migrate your `deployment.yaml` file to a `prefect.yaml` file. - https://github.com/PrefectHQ/prefect/pull/12731 +- `prefect deploy` options update: The `-f/--flow` option has been removed from `prefect deploy` following its deprecation last June. Please deploy +using the flow entrypoint instead. - https://github.com/PrefectHQ/prefect/pull/12732 +- `prefect project` removal: The `projects` command group has been removed following its deprecation last June. For instance, instead of using `prefect project init`, use `prefect init` instead. — https://github.com/PrefectHQ/prefect/pull/12737 +- `--ci` option removal: The `--ci` option in `prefect deploy` has been removed to unify the deployment experience across different environments. This removal follows its scheduled deprecation. Please use the `--no-prompt` option instead, e.g. `prefect --no-prompt deploy`. — https://github.com/PrefectHQ/prefect/pull/12740 + +### Enhancements +- Improve account selection in `prefect cloud login` and `workspace set` — https://github.com/PrefectHQ/prefect/pull/12717 + +### Fixes +- Raise clearer flow validation error — https://github.com/PrefectHQ/prefect/pull/12715 +- Exclude job_variables when exclude=None — https://github.com/PrefectHQ/prefect/pull/12712 +- Remove experimental flags on infrastructure overrides — https://github.com/PrefectHQ/prefect/pull/12742 + +### Experimental + +#### Pydantic V2 Compatibility +- Introduce self-validating types — https://github.com/PrefectHQ/prefect/pull/12707 +- Refactor `field_validator` and `model_validator` to map Pydantic kwargs between versions — https://github.com/PrefectHQ/prefect/pull/12676 +- Fix type-hinting for self-validating fields — https://github.com/PrefectHQ/prefect/pull/12710 +- Fix types NonNegativeDuration / PositiveDuration — https://github.com/PrefectHQ/prefect/pull/12711 + + +#### Events and Automations +- Implement the `run-deployment` automation action — https://github.com/PrefectHQ/prefect/pull/12677 +- Implement the `send-notification` action — https://github.com/PrefectHQ/prefect/pull/12693 +- Make `TriggeredAction.firing` required — https://github.com/PrefectHQ/prefect/pull/12697 +- Add an Actions service — https://github.com/PrefectHQ/prefect/pull/12699 +- Implement the `call-webhook` action and adds all Action client-side schemata — https://github.com/PrefectHQ/prefect/pull/12728 +- Implement `change-flow-run-state`, `cancel-flow-run`, and `suspend-flow-run` — https://github.com/PrefectHQ/prefect/pull/12730 +- Add functions for querying and counting events — https://github.com/PrefectHQ/prefect/pull/12696 +- Implement the `pause-deployment` and `resume-deployment` actions — https://github.com/PrefectHQ/prefect/pull/12733 +- Add `/events/filter` and `/events/count-by` route trees — https://github.com/PrefectHQ/prefect/pull/12736 +- Allow for creating automations via deployments when experimental events is on — https://github.com/PrefectHQ/prefect/pull/12701 +- Add ability to stream out events via websocket — https://github.com/PrefectHQ/prefect/pull/12744 +- Implement the `pause-automation` and `resume-automation` actions — https://github.com/PrefectHQ/prefect/pull/12738 +- Add automations CLI — https://github.com/PrefectHQ/prefect/pull/12754 +- Rename `prefect-cloud.*` events and labels to `prefect.*` — https://github.com/PrefectHQ/prefect/pull/12755 +- Add ability to emit events to an ephemeral Prefect server — https://github.com/PrefectHQ/prefect/pull/12762 +- Disable `events` and `automations` API routes when experimental events setting is not enabled — https://github.com/PrefectHQ/prefect/pull/12777 +- Add compatibility tests for client and server triggers and actions — https://github.com/PrefectHQ/prefect/pull/12778 +- Disable the automations integration flows for Prefect Cloud — https://github.com/PrefectHQ/prefect/pull/12784 +- Add pause and resume the work pool and work queue actions — https://github.com/PrefectHQ/prefect/pull/12735 +- Add helper functions for creating an events client or subscriber — https://github.com/PrefectHQ/prefect/pull/12759 +- Add default posture to `EventTrigger` schema — https://github.com/PrefectHQ/prefect/pull/12764 + - Fix writing events for SQLite + SQLAlchemy<2 — https://github.com/PrefectHQ/prefect/pull/12679 + +### Documentation +- Update `prefect.yaml` example in work pools concepts page — https://github.com/PrefectHQ/prefect/pull/12695 +- Fix typo in Quickstart — https://github.com/PrefectHQ/prefect/pull/12729 +- Simplify quickstart — https://github.com/PrefectHQ/prefect/pull/12725 +- Add `.serve`, `.deploy`, and composite trigger examples to deployment triggers docs — https://github.com/PrefectHQ/prefect/pull/12743 +- Update automations images — https://github.com/PrefectHQ/prefect/pull/12752 +- Simplify tutorial — https://github.com/PrefectHQ/prefect/pull/12765 +- Remove disclaimer for Python 3.12 experimental support — https://github.com/PrefectHQ/prefect/pull/12771 +- Clarify deployment trigger examples — https://github.com/PrefectHQ/prefect/pull/12782 +- Remove Prefect-managed integration libraries to be archived from the integrations catalog — https://github.com/PrefectHQ/prefect/pull/12781 +- Fix broken link to push work pool guide — https://github.com/PrefectHQ/prefect/pull/12748 +- Fix minor restructure to improve legibility of work pools tutorial — https://github.com/PrefectHQ/prefect/pull/12747 +- Fix `typing` import and typos in tasks tutorial — https://github.com/PrefectHQ/prefect/pull/12746 +- Simplify installation — https://github.com/PrefectHQ/prefect/pull/12772 +- Fix import syntax in `variables.Variable` example — https://github.com/PrefectHQ/prefect/pull/12727 +- Fix typo in How-to Guide document — https://github.com/PrefectHQ/prefect/pull/12761 + + +## New Contributors +* @hboehmer-IW made their first contribution in https://github.com/PrefectHQ/prefect/pull/12721 +* @avriiil made their first contribution in https://github.com/PrefectHQ/prefect/pull/12748 +* @takashimakazuki made their first contribution in https://github.com/PrefectHQ/prefect/pull/12761 + +### Integrations +- Add support for a capacity provider — https://github.com/PrefectHQ/prefect-aws/pull/407 +- Improve error handling for task creation — https://github.com/PrefectHQ/prefect-aws/pull/406 + + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.17.1...2.18.0 + +## Release 2.17.1 + +### Fixes +- Fix events storage import — https://github.com/PrefectHQ/prefect/pull/12681 +- Remove `opentelemetry` import — https://github.com/PrefectHQ/prefect/pull/12684 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.17.0...2.17.1 + +## Release 2.17.0 + +### Manage Prefect variables via the Python SDK + +Prefect variables are useful for storing and reusing data and configuration between and across workflows; and previously you could only create and update variables via the Prefect UI. With this release, you can now get and set Prefect variables directly in your Python code with the new `Variable.set` and `Variable.get` methods! + +For an example of reading and writing variable values in Python see the following example: + +```python +from prefect.variables import Variable + +# set a variable +variable = Variable.set(name="the_answer", value="42") + +# get a variable +answer = Variable.get('the_answer') +print(answer.value) +# 42 + +# get a variable with a default value +answer = Variable.get('not_the_answer', default='42') +print(answer.value) +# 42 + +# update a variable +answer = Variable.set(name="the_answer", value="43", overwrite=True) +print(answer.value) +#43 +``` + +Refer to the [docs](https://docs.prefect.io/latest/guides/variables/#accessing-variables) for more information and see the PR for implementation details: https://github.com/PrefectHQ/prefect/pull/12596 + +### Enhancements +- Allow flows inside tasks + — https://github.com/PrefectHQ/prefect/pull/12559 + — https://github.com/PrefectHQ/prefect/pull/12607 +- Add `User-Agent` header containing the running Prefect version — https://github.com/PrefectHQ/prefect/pull/12601 +- Adds deployment version to the flow run object — https://github.com/PrefectHQ/prefect/pull/12591 + +### Fixes +- Transition flow runs without active infrastructure directly to cancelled — https://github.com/PrefectHQ/prefect/pull/12582 +- Remove duplicate CLI output when reauthorizing with `prefect cloud login` — https://github.com/PrefectHQ/prefect/pull/12664 +- Add `blob_storage` extra as requirement for Azure `prefect.yaml` recipes — https://github.com/PrefectHQ/prefect/pull/12333 +- Exclude Typer 0.12.2 from solver — https://github.com/PrefectHQ/prefect/pull/12618 +- Correct `schedules`/`is_schedule_active` deprecation windows — https://github.com/PrefectHQ/prefect/pull/12616 + +### Experimental / In-Flight Features + +#### Pydantic V2 Compatibility +- Add `pydantic` V2 compatible `field_validator` — https://github.com/PrefectHQ/prefect/pull/12576 +- Add `pydantic` V2 `model_validator` — https://github.com/PrefectHQ/prefect/pull/12635 +- Expose `field_validator` in `pydantic` compatibility layer — https://github.com/PrefectHQ/prefect/pull/12608 +- Add `ConfigDict` to `pydantic` compatibility layer — https://github.com/PrefectHQ/prefect/pull/12629 +- Add `model_fields_set` to `pydantic` compatibility layer — https://github.com/PrefectHQ/prefect/pull/12654 +- Map `copy_on_model_validation` to `revalidate_instances` in `pydantic` compatibility layer — https://github.com/PrefectHQ/prefect/pull/12644 + +#### Events and Automations +- Enable `EventsWorker` to emit events to Prefect servers — https://github.com/PrefectHQ/prefect/pull/12637 +- Add ORM models and database migrations for events storage — https://github.com/PrefectHQ/prefect/pull/12651 +- Add automations API — https://github.com/PrefectHQ/prefect/pull/12620 +- Add reactive and composite triggers — https://github.com/PrefectHQ/prefect/pull/12650 +- Add proactive triggers — https://github.com/PrefectHQ/prefect/pull/12660 +- Add `EventPersister` service to store received events - https://github.com/PrefectHQ/prefect/pull/12662 + +### Deprecations +- Remove expired deprecations from `prefect/__init__.py` — https://github.com/PrefectHQ/prefect/pull/12613 + +### Documentation +- Update references to deployment schedules — https://github.com/PrefectHQ/prefect/pull/12595 +- Add missing navigation items for `prefect shell` CLI command — https://github.com/PrefectHQ/prefect/pull/12598 +- Update formatting for `prefect shell` CLI command — https://github.com/PrefectHQ/prefect/pull/12606 +- Add comment to blocks concept page when using `SecretStr` with `pydantic` V2 — https://github.com/PrefectHQ/prefect/pull/12632 +- Fix name format in `run_deployment` docstring — https://github.com/PrefectHQ/prefect/pull/12628 +- Add documentation for flow run job variables — https://github.com/PrefectHQ/prefect/pull/12490 +- Add example of retrieving an artifact in Python code — https://github.com/PrefectHQ/prefect/pull/12666 + +### Contributors +- @hainenber + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.16.9...2.17.0 + +## Release 2.16.9 + +### `prefect deploy` with `-jv/--job-variable` option + +In a prior release, we added a `-jv/--job-variable` option for providing job variables when running a deployment using `prefect deployment run`. We want to be consistent in our CLI by allowing you to use this option while creating deployments during `prefect deploy`! Thus, we have added a `-jv/--job-variable` option to `prefect deploy` to replace the `-v/--variables` option, which we have now deprecated. + +See the following pull request for implementation details: +- https://github.com/PrefectHQ/prefect/pull/12410 + +### Enhancements +- Remove nested task constraint that prevented tasks called from other tasks — https://github.com/PrefectHQ/prefect/pull/12548 +- Stop creating artifacts for unpersisted results - https://github.com/PrefectHQ/prefect/pull/12454 +- Allow for deletion of work pool workers via API — https://github.com/PrefectHQ/prefect/pull/12330 +- Raise more informative error on `prefect worker start -t bad-type` - https://github.com/PrefectHQ/prefect/pull/12586 +- Add tooltip and increase width to support better displaying long Prefect variable names in the UI https://github.com/PrefectHQ/prefect-ui-library/pull/2275 + +### Fixes +- Raise lower bound on `typer` dependency — https://github.com/PrefectHQ/prefect/pull/12512 +- Skip flow run cancellation if no associated deployment — https://github.com/PrefectHQ/prefect/pull/12001 +- Handle referenced blocks in base templates during `job_variable` validation — https://github.com/PrefectHQ/prefect/pull/12329 +- Select correct `AsyncWaiter` for successively awaited flow and task calls — https://github.com/PrefectHQ/prefect/pull/12510 +- Handle flow run creation for runner-managed deployments — https://github.com/PrefectHQ/prefect/pull/12319 +- Expose `ignore_warnings` in `Flow.deploy` — https://github.com/PrefectHQ/prefect/pull/12569 +- Allow `prefect cloud login` re-authentication in non-interactive mode — https://github.com/PrefectHQ/prefect/pull/12575 +- Update ECS provisioner IAM policy to include `ecs:TagResource` permission — https://github.com/PrefectHQ/prefect/pull/12551 +- Correctly populate custom default parameters in the flow submission form in the UI - https://github.com/PrefectHQ/prefect-ui-library/pull/2280 + +### Experimental / In-Flight Features +#### Flow Run Infrastructure Overrides +- Add support for adding job variables to trigger definitions via CLI - https://github.com/PrefectHQ/prefect/pull/12276 + +#### Pydantic V2 Compatibility +- Add dynamic importing of Pydantic modules + - https://github.com/PrefectHQ/prefect/pull/12498 + - https://github.com/PrefectHQ/prefect/pull/12503 +- Refactor Pydantic V2 compatibility layer into submodules — https://github.com/PrefectHQ/prefect/pull/12522 +- Enable support for `mode="json"` in `model_dump` function by default — https://github.com/PrefectHQ/prefect/pull/12540 + +#### Events and Automations +- Add message publisher and consumer abstractions, with in-memory implementation — https://github.com/PrefectHQ/prefect/pull/12485 +- Add events HTTP and websocket endpoints — https://github.com/PrefectHQ/prefect/pull/12499 +- Add a diagnostic service which consumes events and prints a summary of them — https://github.com/PrefectHQ/prefect/pull/12501 +- Add internal events client for publishing events from other server-side areas — https://github.com/PrefectHQ/prefect/pull/12520 +- Add an internal orchestration API client for use in events — https://github.com/PrefectHQ/prefect/pull/12534 +- Add server-side automations schema models — https://github.com/PrefectHQ/prefect/pull/12549 +- Add ORM classes and model modules for automations and its state tables — https://github.com/PrefectHQ/prefect/pull/12581 + +### Integrations - Prefect AWS +- Fix `S3Bucket.copy_object` target path resolution — https://github.com/PrefectHQ/prefect-aws/pull/385 +- Add Python 3.12 support and remove 3.7 support — https://github.com/PrefectHQ/prefect-aws/pull/405 +- Change logging prefix to avoid unnecessary task definition registrations — https://github.com/PrefectHQ/prefect-aws/pull/400 + +### Deprecations +- Deprecate `KubernetesCusterConfig` block — https://github.com/PrefectHQ/prefect/pull/12571 +- Remove use of PartialModel — + +### Documentation +- Add `prefect shell` commands to guides index — https://github.com/PrefectHQ/prefect/pull/12494 +- Update Prefect Cloud plan information — https://github.com/PrefectHQ/prefect/pull/12505 +- Add timeout information to flows concept page — https://github.com/PrefectHQ/prefect/pull/12550 +- Remove outdated doc warning on calling tasks within tasks — https://github.com/PrefectHQ/prefect/pull/12580 +- Remove broken link from FAQ page - https://github.com/PrefectHQ/prefect/pull/12590 +- Fix typo in FAQ page — https://github.com/PrefectHQ/prefect/pull/12584 + +### Contributors +* @hainenber +* @jwijffels made their first contribution in https://github.com/PrefectHQ/prefect/pull/12575 +* @ShaoyiZhang made their first contribution in https://github.com/PrefectHQ/prefect/pull/12584 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.16.8...2.16.9 + +## Release 2.16.8 + +### Fixes +- Disable CSRF protection by default - https://github.com/PrefectHQ/prefect/pull/12479 +- Fix issue causing UI not to be built when creating docker images - https://github.com/PrefectHQ/prefect/pull/12481 + +## Release 2.16.7 + +### Introducing `prefect shell` 💻 for observing CLI commands +You can now observe CLI commands as a Prefect flow. For example, take the command: +```console +» curl http://wttr.in/Chicago\?format\=3 +Chicago: ⛅️ +50°F +``` + +To run this as a Prefect flow, you can use the following CLI command: +```python +» prefect shell watch "curl http://wttr.in/Chicago?format=3" +17:32:39.562 | INFO | prefect.engine - Created flow run 'powerful-mushroom' for flow 'Shell Command' +17:32:40.171 | INFO | Flow run 'powerful-mushroom' - Chicago: ⛅️ +50°F +17:32:40.315 | INFO | Flow run 'powerful-mushroom' - Finished in state Completed() +``` + +See these [docs](https://docs.prefect.io/latest/guides/cli-shell) to learn how to: +- run a shell command as a Prefect flow on-demand with `watch` +- schedule a shell command as a recurring Prefect flow using `serve` + +See the PR for implementation details: https://github.com/PrefectHQ/prefect/pull/11998 + +### Enhancements +- Integrate composite triggers with the `DeploymentTrigger` YAML representation — https://github.com/PrefectHQ/prefect/pull/12413 +- Add JSON Artifacts — https://github.com/PrefectHQ/prefect/pull/12295 +- Add auto-provisioning option for Cloud Run V2 push work pools — https://github.com/PrefectHQ/prefect/pull/12422 +- Increase late runs after seconds setting default — https://github.com/PrefectHQ/prefect/pull/12457 + +### Fixes +- Properly display falsy `concurrency_limit` value in CLI — https://github.com/PrefectHQ/prefect/pull/12358 +- Correct wrong date in `prefect deploy` deprecation warning for `schedule` — https://github.com/PrefectHQ/prefect/pull/12399 +- Prompt user confirmation for pausing work queue in default work pool — https://github.com/PrefectHQ/prefect/pull/12334 +- Correct type for `slot_decay_per_second` in client SDK — https://github.com/PrefectHQ/prefect/pull/12401 +- Sync SDK upgrades with UI upgrades — https://github.com/PrefectHQ/prefect/pull/12429 +- Pin uvicorn to < 0.29 — https://github.com/PrefectHQ/prefect/pull/12463 + +### Experimental +- More robust error handling in `TaskServer` — https://github.com/PrefectHQ/prefect/pull/12382 +- Add `model_validate_json` to Pydantic compat layer — https://github.com/PrefectHQ/prefect/pull/12412 +- Add `model_dump_json` to Pydantic compat layer — https://github.com/PrefectHQ/prefect/pull/12406 +- Add hybrid `BaseModel` and public `pydantic` module — https://github.com/PrefectHQ/prefect/pull/12424 +- Add Pydantic `TypeAdapter` backport — https://github.com/PrefectHQ/prefect/pull/12445 +- Add `model_copy` to Pydantic compat layer — https://github.com/PrefectHQ/prefect/pull/12418 + +### Documentation +- Add `prefect shell` CLI documentation — https://github.com/PrefectHQ/prefect/pull/12474 +- Add links to serverless and push serverless work pool guides for dependency management — https://github.com/PrefectHQ/prefect/pull/12392 +- Add example of transitioning all running flows to `CANCELLED` via Prefect client — https://github.com/PrefectHQ/prefect/pull/12390 +- Temporarily remove social cards — https://github.com/PrefectHQ/prefect/pull/12465 + +### Contributors +- @hainenber + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.16.6...2.16.7 + +## Release 2.16.6 + +### Fix new behavior in `typer 0.10.0` that broke the `prefect` CLI +See the following pull request for implementation details: https://github.com/PrefectHQ/prefect/pull/12398 + +### Enhancements +- Improve nested schema hydration for templating — https://github.com/PrefectHQ/prefect/pull/12384 +- Improve interactive workspace selection in the CLI — https://github.com/PrefectHQ/prefect/pull/12387 +- Use Pydantic V2 for schema generation where possible — https://github.com/PrefectHQ/prefect/pull/12210 +- Enable CSRF protection by default — https://github.com/PrefectHQ/prefect/pull/12377 + +### Fixes +- Handle new `typer.Option` behavior — https://github.com/PrefectHQ/prefect/pull/12398 + +### Experimental +- Add experimental `model_validate` function for Pydantic V2 compatibility — https://github.com/PrefectHQ/prefect/pull/12370 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.16.5...2.16.6 + +## Release 2.16.5 + +### Multi-select deletion of flow runs +It is now easier to bulk select and delete flow runs through the UI. Listings of filterable and selectable flow runs (e.g. on the flow runs, flow, and deployment pages) now include a top-level checkbox for (de)selecting all currently filtered flow runs for bulk deletion. + +![image](https://github.com/PrefectHQ/prefect/assets/42048900/2431caf4-c1be-4afd-bcff-3c24fa94dc64) + +See the following pull request for implementation details: +- https://github.com/PrefectHQ/prefect/pull/12356 +- https://github.com/PrefectHQ/prefect-ui-library/pull/2227 +- https://github.com/PrefectHQ/prefect/pull/12285 + +### Visualize state changes and artifacts in the UI +Additionally, the flow run graph UI enhancements for visualizing state changes and artifacts added in 2.16.4 are now enabled by default. See [the release notes in 2.16.14 for more details](https://github.com/PrefectHQ/prefect/blob/main/RELEASE-NOTES.md#release-2164)! + +### Enhancements +- Keep artifacts file in prefect-client — https://github.com/PrefectHQ/prefect/pull/12316 +- remove feature flagging around enhanced-deployment-experiment — https://github.com/PrefectHQ/prefect/pull/12360 +- Feature : #11773 UI: Add checkboxes for runs for an individual flow to allow multi-selection/-deletion — https://github.com/PrefectHQ/prefect/pull/12285 +- Add a capability to verify ssl certificates in Prefect CLI — https://github.com/PrefectHQ/prefect/pull/11771 +- Add `prefect task-run` command group to CLI — https://github.com/PrefectHQ/prefect/pull/12307 +- Correct emit background task state change events — https://github.com/PrefectHQ/prefect/pull/12352 +- Update `CsrfTokenApi` to retry failed requests due to invalid tokens — https://github.com/PrefectHQ/prefect/pull/12373 + +### Fixes +- Refactor logic to set `task_key` for background tasks — https://github.com/PrefectHQ/prefect/pull/12337 +- Correct a memory leak with the outbound task run websockets — https://github.com/PrefectHQ/prefect/pull/12346 +- Correctly type hint in flow run state change hooks — https://github.com/PrefectHQ/prefect/pull/12231 + +### Experimental +- Create `CsrfToken` model and utilities — https://github.com/PrefectHQ/prefect/pull/12289 +- Create `csrf_token` endpoint to generate tokens for clients — https://github.com/PrefectHQ/prefect/pull/12297 +- Integrate `CsrfMiddleware` into API server — https://github.com/PrefectHQ/prefect/pull/12303 +- Add CSRF support to client — https://github.com/PrefectHQ/prefect/pull/12314 +- Return 422 when CSRF is disabled and delete expired tokens — https://github.com/PrefectHQ/prefect/pull/12342 +- Add `model_dump` definition for Pydantic v2 compatibility layer — https://github.com/PrefectHQ/prefect/pull/12345 +- Add experimental `model_json_schema` definition for Pydantic V2 compatibility - https://github.com/PrefectHQ/prefect/pull/12362 +- Implement CSRF support in the UI — https://github.com/PrefectHQ/prefect/pull/12354 + +### Documentation +- Add upstream dependencies guide to docs — https://github.com/PrefectHQ/prefect/pull/12351 +- Update documentation on event and metric automation triggers — https://github.com/PrefectHQ/prefect/pull/12366 +- Add documentation on compound and sequence automation triggers — https://github.com/PrefectHQ/prefect/pull/12374 +- Add CSRF settings to common settings section in docs — https://github.com/PrefectHQ/prefect/pull/12376 + +### Uncategorized +- Pin `BuildKit` to 0.12.5 to fix issue with test image build — https://github.com/PrefectHQ/prefect/pull/12343 +- Backporting the Prefect Cloud composite trigger schemas — https://github.com/PrefectHQ/prefect/pull/12378 + +### Contributors +* @hainenber +* @JiginJayaprakash made their first contribution in https://github.com/PrefectHQ/prefect/pull/12307 +* @baisystems made their first contribution in https://github.com/PrefectHQ/prefect/pull/11771 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.16.4...2.16.5 + +## Release 2.16.4 + +### Flow Run Graph updates + +The Flow Run Graph has been updated to display additional layers of information! Interactive and real-time state changes and artifacts are now visible in context on the graph. + +The Prefect flow run graph + + +These new layers are available for opt-in usage via the `PREFECT_EXPERIMENTAL_ENABLE_ARTIFACTS_ON_FLOW_RUN_GRAPH` and `PREFECT_EXPERIMENTAL_ENABLE_STATES_ON_FLOW_RUN_GRAPH` settings. + +### Agents + +A year ago, we released workers as a replacement for agents. Workers significantly enhance the experience of deploying flows and simplify the specification of each flow's infrastructure and runtime environment. + +With this release we are adding a six month (September 14) deprecation warning to agents and related concepts. Please note that: + +- Deprecation will not impact or break any work running with agents and agent-related concepts - although we encourage users to upgrade to workers because they provide a better deployment experience, you can continue to use existing agents and related concepts after deprecation +- After September 14, Prefect Cloud users will not be able to create new agent work pools or infrastructure blocks +- After September 14, new minor versions of the Prefect Python package will not include agents + +Like agents, workers support creating deployments through the Prefect CLI and through Python, but require different syntax. + +For more information, please refer to the [Upgrade from Agents to Workers](https://docs.prefect.io/latest/guides/upgrade-guide-agents-to-workers/) guide. + +### Enhancements +* Give better client-side feedback on websocket authT/authZ issues - https://github.com/PrefectHQ/prefect/pull/12221 +* Allow table artifact cells to render markdown content - [`#2190`](https://github.com/PrefectHQ/prefect-ui-library/pull/2190) +* Add an 'AzureBlobStorageContainer' block - [`#139`](https://github.com/PrefectHQ/prefect-azure/pull/139) +* API for task run counts by state - https://github.com/PrefectHQ/prefect/pull/12244 +* Improved UI handling of custom flow run states. Badges for a state with a custom name will now more closely resemble their underlying state - https://github.com/PrefectHQ/prefect-ui-library/pull/2210 and https://github.com/PrefectHQ/prefect-ui-library/pull/2208 +### Fixes +* Fix support for legacy schedule in `build_from_flow` - https://github.com/PrefectHQ/prefect/pull/12257 +* Remove extra field from create endpoints - https://github.com/PrefectHQ/prefect/pull/12240 +* Prevent invalid link problem for `prefect deployment run` - https://github.com/PrefectHQ/prefect/pull/12267 +* Hydrate run input when resuming a flow run - https://github.com/PrefectHQ/prefect/pull/12259 +* Fix state select in notifications [`#2216`](https://github.com/PrefectHQ/prefect-ui-library/pull/2216) +* Adds porting of network configuration to generated base job templates [`#392`](https://github.com/PrefectHQ/prefect-aws/pull/392) +### Experimental +* Expose `serve` method on tasks - https://github.com/PrefectHQ/prefect/pull/12239 +* Avoid relative module path problem with subscription keys - https://github.com/PrefectHQ/prefect/pull/12227 +### Deprecations +* Deprecate `PrefectAgent` and `prefect agent` command group - https://github.com/PrefectHQ/prefect/pull/12273 +* Deprecate Infrastructrue blocks - https://github.com/PrefectHQ/prefect/pull/12282 +* Deprecate `Deployment` class and deployment `build` and `apply` commands - https://github.com/PrefectHQ/prefect/pull/12283 +* Deprecate the `packaging` module - https://github.com/PrefectHQ/prefect/pull/12291 +* Deprecate storage blocks with implementations in Prefect integration packages - https://github.com/PrefectHQ/prefect/pull/12299 +* Do not create `default-agent-pool` in empty state server - https://github.com/PrefectHQ/prefect/pull/12214 +### Typing +* Refactor: Inject Client - https://github.com/PrefectHQ/prefect/pull/12258 +* Fix: Variables methods type-hinting - https://github.com/PrefectHQ/prefect/pull/12278 +### Documentation +* Fix formatting in concept -> work-pools.md - https://github.com/PrefectHQ/prefect/pull/12230 +* Add `get_run_logger` to imports in interactive workflow examples - https://github.com/PrefectHQ/prefect/pull/12284 +* Add `on_running` state change hook to State -> Concepts docs - https://github.com/PrefectHQ/prefect/pull/12293 +* Fix some argument names in deploy guide - https://github.com/PrefectHQ/prefect/pull/12301 + + +## New Contributors + +- @hainenber made their first contribution in https://github.com/PrefectHQ/prefect/pull/12232 + +**Full Changelog**: https://github.com/PrefectHQ/prefect/compare/2.16.3...2.16.4 + +## Release 2.16.3 + +### Enhanced deployment parameters in the Prefect UI + +We’ve refined the deployment parameters UI to significantly enhance default value handling and list management. This release introduces support for tuple-type parameters and a revamped list UI, freeing users from the constraints of JSON for list inputs. You now have the flexibility to utilize JSON or Prefect variables for any parameter value, enhancing the versatility of deployment configurations. Moreover, we’ve synchronized validation across the UI and deployment schemas, ensuring that user inputs are consistently checked against the defined parameter requirements, which simplifies the deployment process and minimizes configuration errors. + +These improvements are aimed at providing a more efficient and user-friendly interface for managing deployment parameters. Check it out for yourself! + +paramsui + +See the following PRs for implementation details: + +- https://github.com/PrefectHQ/prefect/pull/12168 +- https://github.com/PrefectHQ/prefect/pull/12179 +- https://github.com/PrefectHQ/prefect/pull/12186 +- https://github.com/PrefectHQ/prefect/pull/12187 +- https://github.com/PrefectHQ/prefect/pull/12182 +- https://github.com/PrefectHQ/prefect/pull/12219 + +### Enhancements + +- Adds `on_running` state change hook — https://github.com/PrefectHQ/prefect/pull/12153 +- Add flow run state data to flow run graph API — https://github.com/PrefectHQ/prefect/pull/12130 +- Add schedules shorthand support to `Deployment.build_from_flow` — https://github.com/PrefectHQ/prefect/pull/12181 +- Send flow run and task run retry logs to API — https://github.com/PrefectHQ/prefect/pull/12211 +- Add the flow run graph states layer and handle selection — https://github.com/PrefectHQ/prefect/pull/12166 - asked Brandon +- Add a paused deployment filter and update `is_schedule_active` filter — https://github.com/PrefectHQ/prefect/pull/12202 +- Updating client-side schemas for automation triggers to reflect updates in Prefect Cloud — https://github.com/PrefectHQ/prefect/pull/12205 + +### Fixes + +- Address two memory leak in concurrency management — https://github.com/PrefectHQ/prefect/pull/12141 + +### Experimental + +- Add Job Variables tab to the Flow Runs page — https://github.com/PrefectHQ/prefect/pull/12206 +- Add support for calling `.map` for autonomous task runs — https://github.com/PrefectHQ/prefect/pull/12171 +- Simplify already authenticated logic for `prefect cloud login` — https://github.com/PrefectHQ/prefect/pull/12209 +- Add better parity with Prefect Cloud for task scheduling protocol - https://github.com/PrefectHQ/prefect/pull/12212 +- Allow creating autonomous task runs via `__call__` — https://github.com/PrefectHQ/prefect/pull/12158 + +### Integrations + +- Fix VPC Connector usage in Cloud Run Worker v2 - https://github.com/PrefectHQ/prefect-gcp/pull/252 + +### Documentation + +- Add advanced example to CI/CD guide — https://github.com/PrefectHQ/prefect/pull/12188 +- Add keyword for search to deployments concept doc — https://github.com/PrefectHQ/prefect/pull/12178 +- Add a `prefect-client` README — https://github.com/PrefectHQ/prefect/pull/12172 +- Add `prefect-soda-cloud` integration — https://github.com/PrefectHQ/prefect/pull/12208 +- Update Prefect self-hosted guide to clarify PostgreSQL Docker instructions — https://github.com/PrefectHQ/prefect/pull/12164 +- Update README example — https://github.com/PrefectHQ/prefect/pull/12167 +- Remove outdated sqlite version info from install docs — https://github.com/PrefectHQ/prefect/pull/12162 + +### Contributors + +- @AlessandroLollo + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.16.2...2.16.3 + +## Release 2.16.2 + +### Enhancements + +- Add ability to use a module path entrypoint when using `.serve` or `.deploy` — https://github.com/PrefectHQ/prefect/pull/12134 +- Add `delete_task_run` client method — https://github.com/PrefectHQ/prefect/pull/12142 +- Add Artifacts on the flow run graph — https://github.com/PrefectHQ/prefect/pull/12156 + +### Fixes + +- Support obfuscation of more complex log record messages — https://github.com/PrefectHQ/prefect/pull/12151 + +### Documentation + +- Remove tab structure for three docs pages to improve navigation — https://github.com/PrefectHQ/prefect/pull/12127 +- Add clarifications and style updates on the events page — https://github.com/PrefectHQ/prefect/pull/12133 + +### Experimental + +- Try to use the same block for autonomous task scheduling storage — https://github.com/PrefectHQ/prefect/pull/12122 +- Reliability improvements for autonomous task scheduling — https://github.com/PrefectHQ/prefect/pull/12115 + +### Contributors + +- @eladm26 +- @seano-vs + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.16.1...2.16.2 + +## Release 2.16.1 + +### Enhanced multiple schedule support + +`prefect.yaml` now supports specifying multiple schedules via the `schedules` key. This allows you to define multiple schedules for a single deployment, and each schedule can have its own `cron`, `interval`, or `rrule` configuration: + +```yaml + ... + schedules: + - cron: "0 0 * * *" + active: false + - interval: 3600 + active: true + - rrule: "FREQ=YEARLY" + active: true +``` + +In addition, you can now specify multiple schedules via arguments to `prefect deploy`: + +`prefect deploy ... --cron '4 * * * *' --cron '1 * * * *' --rrule 'FREQ=DAILY'` + +We've also added support for multiple schedules to `flow.serve`, `flow.deploy` and `prefect.runner.serve`. You can provide multiple schedules by passing a list to the `cron`, `interval`, or `rrule` arguments: + +```python +import datetime +import random + +from prefect import flow + + +@flow +def trees(): + tree = random.choice(["🌳", "🌴", "🌲", "🌵"]) + print(f"Here's a happy little tree: {tree}") + +if __name__ == "__main__": + trees.serve( + name="trees", + interval=[3600, 7200, 14400], + ) +``` + +This will create a deployment with three schedules, one that runs every hour, one that runs every two hours, and one that runs every four hours. For more advanced cases, use the `schedules` argument. + +```python + trees.serve( + name="trees", + schedules=[ + IntervalSchedule(interval=datetime.timedelta(minutes=30)), + {"schedule": RRuleSchedule(rrule="FREQ=YEARLY"), "active": True}, + MinimalDeploymentSchedule(schedule=CronSchedule(cron="0 0 * * *"), active=False), + ] + ) +``` + +Dive into these new scheduling capabilities today and streamline your workflows like never before. + +For implementation details, see the following pull request: - https://github.com/PrefectHQ/prefect/pull/12107 + +### Enhancements + +- Add a logging filter to prevent logging the current API key — https://github.com/PrefectHQ/prefect/pull/12072 +- Update `flow.serve` to support multiple schedules — https://github.com/PrefectHQ/prefect/pull/12107 +- Update `prefect deploy` to support multiple schedules — https://github.com/PrefectHQ/prefect/pull/12121 + +### Fixes + +- Clear runs when updating or deleting schedules, even if the deployment is paused — https://github.com/PrefectHQ/prefect/pull/12089 +- Surface missing work pool error in CLI — https://github.com/PrefectHQ/prefect/pull/12087 +- Ignore outdated `schedule` in `Deployment.build_from_flow` — https://github.com/PrefectHQ/prefect/pull/12100 +- Fix schedule instructions for `prefect deploy` — https://github.com/PrefectHQ/prefect/pull/12101 +- Fix reference to `prefect deployment schedule create` — https://github.com/PrefectHQ/prefect/pull/12117 +- Ensure only scheduled runs can be marked late — https://github.com/PrefectHQ/prefect/pull/12113 + +### Documentation + +- Update outdated automations concepts page image — https://github.com/PrefectHQ/prefect/pull/12059 +- Update automations concept page for recent triggers and actions — https://github.com/PrefectHQ/prefect/pull/12074 +- Add clarifications to tutorial and getting started pages — https://github.com/PrefectHQ/prefect/pull/12077 +- Add minimum Kubernetes version to worker guide — https://github.com/PrefectHQ/prefect/pull/12095 +- Add Coiled to integrations catalog docs page — https://github.com/PrefectHQ/prefect/pull/12098 +- Fix formatting on webhooks page — https://github.com/PrefectHQ/prefect/pull/12088 + +### Experimental + +- Add artifact data to flow run graph API — https://github.com/PrefectHQ/prefect/pull/12105 +- Add feature flag for flow run infra overrides — https://github.com/PrefectHQ/prefect/pull/12065 + +## New Contributors + +- @jrbourbeau made their first contribution in https://github.com/PrefectHQ/prefect/pull/12098 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.16.0...2.16.1 + +## Release 2.16.0 + +### 🕢 Deployments now support multiple schedules 🕐 + +With today’s release, we’re excited to roll out initial support for using multiple schedules with Deployments! You can now use multiple schedules in the following ways: + +- Specifying schedules in a Deployment YAML file +- Creating Python-based Deployments with the `Deployment` class +- New CLI commands: `prefect deployment schedule ` +- New UI components aware of multiple schedules + +Coming soon, we’ll round out support for multiple schedules in other areas, such as: + +- When running a flow with `flow.serve()` and `flow.deploy()` +- When using `prefect deploy` + +The easiest way to get started with multiple schedules is to try out the new CLI commands: + +```shell +$ prefect deployment schedule ls happy-flow/my-deployment + Deployment Schedules +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┳━━━━━━━━┓ +┃ ID ┃ Schedule ┃ Active ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━╇━━━━━━━━┩ +│ c7d3ddc4-9a5a-4dec-bd59-eed282ae55d5 │ cron: 0 0 1 * 1 │ True │ +└──────────────────────────────────────┴─────────────────┴────────┘ + +$ prefect deployment schedule create happy-flow/my-deployment --interval 60 +Created deployment schedule! + +$ prefect deployment schedule ls happy-flow/my-deployment + Deployment Schedules +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━┓ +┃ ID ┃ Schedule ┃ Active ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━┩ +│ 3638ed58-cab2-4462-a680-2f92fcf6c797 │ interval: 0:01:00s │ True │ +│ c7d3ddc4-9a5a-4dec-bd59-eed282ae55d5 │ cron: 0 0 1 * 1 │ True │ +└──────────────────────────────────────┴────────────────────┴────────┘ +``` + +### Enhancements + +- Add support for deploying to a process work pool using `flow.deploy` and `deploy` — https://github.com/PrefectHQ/prefect/pull/12017 +- Add support for multiple schedules to Prefect server and CLI — https://github.com/PrefectHQ/prefect/pull/11971 +- Add CLI command to read runs in a work queue — https://github.com/PrefectHQ/prefect/pull/11989 + +### Fixes + +- Update the text for the CLI command `deployment run --help` so it renders for more args — https://github.com/PrefectHQ/prefect/pull/11960 +- Fix `Flow.with_options` logic for retries, retry_delay_seconds, flow_run_name — https://github.com/PrefectHQ/prefect/pull/12020 +- Fix memory leaks related to cancellation scopes and async contextvar usage — https://github.com/PrefectHQ/prefect/pull/12019 +- Revert the recent change that runs on the main thread while we investigate a concurrency issue — https://github.com/PrefectHQ/prefect/pull/12054 +- Add a more readable error if Docker is not running — https://github.com/PrefectHQ/prefect/pull/12045 + +### Documentation + +- Improve language and formatting in Profiles and Configuration guide — https://github.com/PrefectHQ/prefect/pull/11996 +- Improves docs formatting consistency and adds some minor content updates — https://github.com/PrefectHQ/prefect/pull/12004 +- Updates formatting for guide: creating-interactive-workflows.md — https://github.com/PrefectHQ/prefect/pull/11991 +- Add import statement for `wait_for_flow_run` — https://github.com/PrefectHQ/prefect/pull/11999 +- Add deep dive on overriding job variables — https://github.com/PrefectHQ/prefect/pull/12033 +- Remove extraneous trailing quotation marks in concepts/artifacts docs — https://github.com/PrefectHQ/prefect/pull/12040 +- Add links to overriding job variables guide — https://github.com/PrefectHQ/prefect/pull/12043 +- Update scheduling docs to include information about multiple schedules — https://github.com/PrefectHQ/prefect/pull/12064 + +### Experimental + +- Only allow using `Task.submit()` for autonomous task submission — https://github.com/PrefectHQ/prefect/pull/12025 + +## New Contributors + +- @hamzamogni made their first contribution in https://github.com/PrefectHQ/prefect/pull/12000 +- @eladm26 made their first contribution in https://github.com/PrefectHQ/prefect/pull/12045 +- + +### Contributors + +- @NodeJSmith +- @eladm26 +- @hamzamogni + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.15.0...2.16.0 + +## Release 2.15.0 + +### 🔧 Task runs now execute on the main thread + +We are excited to announce that task runs are now executed on the main thread! + +When feasible, task runs are now executed on the main thread instead of a worker thread. Previously, all task runs were run in a new worker thread. This allows objects to be passed to and from tasks without worrying about thread safety unless you have opted into concurrency. For example, an HTTP client or database connection can be shared between a flow and its tasks now (unless synchronous concurrency is used). Some asynchronous and sequential use cases may see performance improvements. + +Consider the following example: + +```python +import sqlite3 +from prefect import flow, task + +db = sqlite3.connect("threads.db") + +try: + db.execute("CREATE TABLE fellowship(name)") +except sqlite3.OperationalError: + pass +else: + db.commit() + +db.execute("DELETE FROM fellowship") +db.commit() + +cur = db.cursor() + + +@task +def my_task(name: str): + global db, cur + + cur.execute('INSERT INTO fellowship VALUES (?)', (name,)) + + db.commit() + + +@flow +def my_flow(): + global db, cur + + for name in ["Frodo", "Gandalf", "Gimli", "Aragorn", "Legolas", "Boromir", "Samwise", "Pippin", "Merry"]: + my_task(name) + + print(cur.execute("SELECT * FROM fellowship").fetchall()) + + db.close() + + +if __name__ == "__main__": + my_flow() +``` + +In previous versions of Prefect, running this example would result in an error like this: + +```python +sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread. The object was created in thread id 7977619456 and this is thread id 6243151872. +``` + +But now, with task runs executing on the main thread, this example will run without error! We're excited this change makes Prefect even more intuitive and flexible! + +See the following pull request for implementation details: - https://github.com/PrefectHQ/prefect/pull/11930 + +### 🔭 Monitor deployment runs triggered via the CLI + +You can monitor the status of a flow run created from a deployment via the CLI. This is useful for observing a flow run's progress without navigating to the UI. + +To monitor a flow run started from a deployment, use the `--watch` option with `prefect deployment run`: + +```console +prefect deployment run --watch / +``` + +See the following pull request for implementation details: - https://github.com/PrefectHQ/prefect/pull/11702 + +### Enhancements + +- Enable work queue status in the UI by default — https://github.com/PrefectHQ/prefect/pull/11976 & https://github.com/PrefectHQ/prefect-ui-library/pull/2080 + +### Fixes + +- Update vendored `starlette` version to resolve vulnerability in `python-mulipart` — https://github.com/PrefectHQ/prefect/pull/11956 +- Fix display of interval schedules created with a different timezone than the current device - https://github.com/PrefectHQ/prefect-ui-library/pull/2090 + +### Experimental + +- Prevent `RUNNING` -> `RUNNING` state transitions for autonomous task runs — https://github.com/PrefectHQ/prefect/pull/11975 +- Provide current thread to the engine when submitting autonomous tasks — https://github.com/PrefectHQ/prefect/pull/11978 +- Add intermediate `PENDING` state for autonomous task execution — https://github.com/PrefectHQ/prefect/pull/11985 +- Raise exception when stopping task server — https://github.com/PrefectHQ/prefect/pull/11928 + +### Documentation + +- Update work pools concepts page to include Modal push work pool — https://github.com/PrefectHQ/prefect/pull/11954 +- Add details to `run_deployment` tags parameter documentation — https://github.com/PrefectHQ/prefect/pull/11955 +- Add Helm chart link in Prefect server instance docs — https://github.com/PrefectHQ/prefect/pull/11970 +- Clarify that async nested flows can be run concurrently — https://github.com/PrefectHQ/prefect/pull/11982 +- Update work queue and flow concurrency information to include push work pools — https://github.com/PrefectHQ/prefect/pull/11974 + +### Contributors + +- @zanieb + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.21...2.15.0 + +## Release 2.14.21 + +### Introducing work queue status + +We're excited to unveil the new status indicators for work queues in Prefect's UI, enhancing your ability to oversee and control flow run execution within our hybrid work pools. + +Work queues will now display one of three distinct statuses: + +- `Ready` - one or more online workers are actively polling the work queue +- `Not Ready` - no online workers are polling the work queue, signaling a need for intervention +- `Paused` - the work queue is intentionally paused, preventing execution + +

+Prefect dashboard snapshot +

+work pools page work queues table here with work queues of all statuses + +With the introduction of work queue status, you'll notice the absence of deprecated work queue health indicators in the UI. + +See the documentation on [work queue status](https://docs.prefect.io/latest/concepts/work-pools/#work-queues) for more information. + +For now, this is an experimental feature, and can be enabled by running: + +```console +prefect config set PREFECT_EXPERIMENTAL_ENABLE_WORK_QUEUE_STATUS=True +``` + +See the following pull request for implementation details: - https://github.com/PrefectHQ/prefect/pull/11829 + +### Fixes + +- Remove unnecessary `WARNING` level log indicating a task run completed successfully — https://github.com/PrefectHQ/prefect/pull/11810 +- Fix a bug where block placeholders declared in pull steps of the `deployments` section of a `prefect.yaml` file were not resolved correctly — https://github.com/PrefectHQ/prefect/pull/11740 +- Use `pool_pre_ping` to improve stability for long-lived PostgreSQL connections — https://github.com/PrefectHQ/prefect/pull/11911 + +### Documentation + +- Clarify Docker tutorial code snippet to ensure commands are run from the correct directory — https://github.com/PrefectHQ/prefect/pull/11833 +- Remove beta tag from incident documentation and screenshots — https://github.com/PrefectHQ/prefect/pull/11921 +- Update Prefect Cloud account roles docs to reflect renaming of previous "Admin" role to "Owner" and creation of new "Admin" role that cannot bypass SSO — https://github.com/PrefectHQ/prefect/pull/11925 + +### Experimental + +- Ensure task subscribers can only pick up task runs they are able to execute — https://github.com/PrefectHQ/prefect/pull/11805 +- Allow a task server to reuse the same task runner to speed up execution — https://github.com/PrefectHQ/prefect/pull/11806 +- Allow configuration of maximum backlog queue size and maximum retry queue size for autonomous task runs — https://github.com/PrefectHQ/prefect/pull/11825 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.20...2.14.21 + +## Release 2.14.20 + +### Fixes + +- Fix runtime bug causing missing work queues in UI — https://github.com/PrefectHQ/prefect/pull/11807 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.19...2.14.20 + +## Release 2.14.19 + +## Dynamic descriptions for paused and suspended flow runs + +You can now include dynamic, markdown-formatted descriptions when pausing or suspending a flow run for human input. This description will be shown in the Prefect UI alongside the form when a user is resuming the flow run, enabling developers to give context and instructions to users when they need to provide input. + +```python +from datetime import datetime +from prefect import flow, pause_flow_run, get_run_logger +from prefect.input import RunInput + +class UserInput(RunInput): + name: str + age: int + +@flow +async def greet_user(): + logger = get_run_logger() + current_date = datetime.now().strftime("%B %d, %Y") + + description_md = f""" +**Welcome to the User Greeting Flow!** +Today's Date: {current_date} + +Please enter your details below: +- **Name**: What should we call you? +- **Age**: Just a number, nothing more. +""" + + user_input = await pause_flow_run( + wait_for_input=UserInput.with_initial_data( + description=description_md, name="anonymous" + ) + ) + + if user_input.name == "anonymous": + logger.info("Hello, stranger!") + else: + logger.info(f"Hello, {user_input.name}!") +``` + +See the following PR for implementation details: + +- https://github.com/PrefectHQ/prefect/pull/11776 +- https://github.com/PrefectHQ/prefect/pull/11799 + +### Enhancements + +- Enhanced `RunInput` saving to include descriptions, improving clarity and documentation for flow inputs — https://github.com/PrefectHQ/prefect/pull/11776 +- Improved type hinting for automatic run inputs, enhancing the developer experience and code readability — https://github.com/PrefectHQ/prefect/pull/11796 +- Extended Azure filesystem support with the addition of `azure_storage_container` for more flexible storage options — https://github.com/PrefectHQ/prefect/pull/11784 +- Added deployment details to work pool information, offering a more comprehensive view of work pool usage — https://github.com/PrefectHQ/prefect/pull/11766 + +### Fixes + +- Updated terminal based deployment operations to make links within panels interactive, enhancing user navigation and experience — https://github.com/PrefectHQ/prefect/pull/11774 + +### Documentation + +- Revised Key-Value (KV) integration documentation for improved clarity and updated authorship details — https://github.com/PrefectHQ/prefect/pull/11770 +- Further refinements to interactive flows documentation, addressing feedback and clarifying usage — https://github.com/PrefectHQ/prefect/pull/11772 +- Standardized terminal output in documentation for consistency and readability — https://github.com/PrefectHQ/prefect/pull/11775 +- Corrected a broken link to agents in the work pool concepts documentation, improving resource accessibility — https://github.com/PrefectHQ/prefect/pull/11782 +- Updated examples for accuracy and to reflect current best practices — https://github.com/PrefectHQ/prefect/pull/11786 +- Added guidance on providing descriptions when pausing flow runs, enhancing operational documentation — https://github.com/PrefectHQ/prefect/pull/11799 + +### Experimental + +- Implemented `TaskRunFilterFlowRunId` for both client and server, enhancing task run filtering capabilities — https://github.com/PrefectHQ/prefect/pull/11748 +- Introduced a subscription API for autonomous task scheduling, paving the way for more dynamic and flexible task execution — https://github.com/PrefectHQ/prefect/pull/11779 +- Conducted testing to ensure server-side scheduling of autonomous tasks, verifying system reliability and performance — https://github.com/PrefectHQ/prefect/pull/11793 +- Implemented a global collections metadata cache clearance between tests, improving test reliability and accuracy — https://github.com/PrefectHQ/prefect/pull/11794 +- Initiated task server testing, laying the groundwork for comprehensive server-side task management — https://github.com/PrefectHQ/prefect/pull/11797 + +## New Contributors + +- @thomasfrederikhoeck made their first contribution in https://github.com/PrefectHQ/prefect/pull/11784 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.18...2.14.19 + +## Release 2.14.18 + +### Fixes + +- Allow prefect settings to accept lists — https://github.com/PrefectHQ/prefect/pull/11722 +- Revert deprecation of worker webserver setting — https://github.com/PrefectHQ/prefect/pull/11758 + +### Documentation + +- Expand docs on interactive flows, detailing `send_input` and `receive_input` — https://github.com/PrefectHQ/prefect/pull/11724 +- Clarify that interval schedules use an anchor not start date — https://github.com/PrefectHQ/prefect/pull/11767 + +## New Contributors + +- @clefelhocz2 made their first contribution in https://github.com/PrefectHQ/prefect/pull/11722 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.17...2.14.18 + +## Release 2.14.17 + +### **Experimental**: Non-blocking submission of flow runs to the `Runner` web server + +You can now submit runs of served flows without blocking the main thread, from inside or outside a flow run. If submitting flows from inside a parent flow, these submitted runs will be tracked as subflows of the parent flow run. + +Prefect flow run graph screenshot + +In order to use this feature, you must: + +- enable the experimental `Runner` webserver endpoints via + ```console + prefect config set PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS=True + ``` +- ensure the `Runner` web server is enabled, either by: + - passing `webserver=True` to your `serve` call + - enabling the webserver via + ```console + prefect config set PREFECT_RUNNER_SERVER_ENABLE=True + ``` + +You can then submit any flow available in the import space of the served flow, and you can submit multiple runs at once. If submitting flows from a parent flow, you may optionally block the parent flow run from completing until all submitted runs are complete with `wait_for_submitted_runs()`. + +
+ Click for an example + +```python +import time + +from pydantic import BaseModel + +from prefect import flow, serve, task +from prefect.runner import submit_to_runner, wait_for_submitted_runs + + +class Foo(BaseModel): + bar: str + baz: int + + +class ParentFoo(BaseModel): + foo: Foo + x: int = 42 + +@task +def noop(): + pass + +@flow(log_prints=True) +async def child(foo: Foo = Foo(bar="hello", baz=42)): + print(f"received {foo.bar} and {foo.baz}") + print("going to sleep") + noop() + time.sleep(20) + + +@task +def foo(): + time.sleep(2) + +@flow(log_prints=True) +def parent(parent_foo: ParentFoo = ParentFoo(foo=Foo(bar="hello", baz=42))): + print(f"I'm a parent and I received {parent_foo=}") + + submit_to_runner( + child, [{"foo": Foo(bar="hello", baz=i)} for i in range(9)] + ) + + foo.submit() + + wait_for_submitted_runs() # optionally block until all submitted runs are complete + + +if __name__ == "__main__": + # either enable the webserver via `webserver=True` or via + # `prefect config set PREFECT_RUNNER_SERVER_ENABLE=True` + serve(parent.to_deployment(__file__), limit=10, webserver=True) +``` + +
+ +This feature is experimental and subject to change. Please try it out and let us know what you think! + +See [the PR](https://github.com/PrefectHQ/prefect/pull/11476) for implementation details. + +### Enhancements + +- Add `url` to `prefect.runtime.flow_run` — https://github.com/PrefectHQ/prefect/pull/11686 +- Add ability to subpath the `/ui-settings` endpoint — https://github.com/PrefectHQ/prefect/pull/11701 + +### Fixes + +- Handle `pydantic` v2 types in schema generation for flow parameters — https://github.com/PrefectHQ/prefect/pull/11656 +- Increase flow run resiliency by gracefully handling `PENDING` to `PENDING` state transitions — https://github.com/PrefectHQ/prefect/pull/11695 + +### Documentation + +- Add documentation for `cache_result_in_memory` argument for `flow` decorator — https://github.com/PrefectHQ/prefect/pull/11669 +- Add runnable example of `flow.from_source()` — https://github.com/PrefectHQ/prefect/pull/11690 +- Improve discoverability of creating interactive workflows guide — https://github.com/PrefectHQ/prefect/pull/11704 +- Fix typo in automations guide — https://github.com/PrefectHQ/prefect/pull/11716 +- Remove events and incidents from concepts index page — https://github.com/PrefectHQ/prefect/pull/11708 +- Remove subflow task tag concurrency warning — https://github.com/PrefectHQ/prefect/pull/11725 +- Remove misleading line on pausing a flow run from the UI — https://github.com/PrefectHQ/prefect/pull/11730 +- Improve readability of Jinja templating guide in automations concept doc — https://github.com/PrefectHQ/prefect/pull/11729 +- Resolve links to relocated interactive workflows guide — https://github.com/PrefectHQ/prefect/pull/11692 +- Fix typo in flows concept documentation — https://github.com/PrefectHQ/prefect/pull/11693 + +### Contributors + +- @sgbaird + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.16...2.14.17 + +## Release 2.14.16 + +### Support for access block fields in `prefect.yaml` templating + +You can now access fields on blocks used in your `prefect.yaml` files. This enables you to use values stored in blocks to provide dynamic configuration for attributes like your `work_pool_name` and `job_variables`. + +Here's what it looks like in action: + +```yaml +deployments: + - name: test + version: 0.1 + tags: [] + description: "Example flow" + schedule: {} + entrypoint: "flow.py:example_flow" + parameters: {} + work_pool: + name: "{{ prefect.blocks.json.default-config.value.work_pool }}" + work_queue: "{{ prefect.blocks.json.default-config.value.work_queue }}" +``` + +In the above example, we use fields from a `JSON` block to configure which work pool and queue we deploy our flow to. We can update where our flow is deployed to by updating the referenced block without needing to change our `prefect.yaml` at all! + +Many thanks to @bjarneschroeder for contributing this functionality! Check out this PR for implementation details: https://github.com/PrefectHQ/prefect/pull/10938 + +### Enhancements + +- Add the `wait_for_flow_run` method to `PrefectClient` to allow waiting for a flow run to complete — https://github.com/PrefectHQ/prefect/pull/11305 +- Add a provisioner for `Modal` push work pools — https://github.com/PrefectHQ/prefect/pull/11665 +- Expose the `limit` kwarg in `serve` to increase its visibility — https://github.com/PrefectHQ/prefect/pull/11645 +- Add methods supporting modification and suppression of flow run notification policies — https://github.com/PrefectHQ/prefect/pull/11163 +- Enhancements to sending and receiving flow run inputs by automatically converting types to `RunInput` subclasses — https://github.com/PrefectHQ/prefect/pull/11636 + +### Fixes + +- Avoid rerunning task runs forced to `COMPLETED` state — https://github.com/PrefectHQ/prefect/pull/11385 +- Add a new UI setting to customize the served static directory — https://github.com/PrefectHQ/prefect/pull/11648 + +### Documentation + +- Fix retry handler example code in task concept docs — https://github.com/PrefectHQ/prefect/pull/11633 +- Fix docstring example in `from_source` — https://github.com/PrefectHQ/prefect/pull/11634 +- Add an active incident screenshot to the documentation — https://github.com/PrefectHQ/prefect/pull/11647 +- Add clarification on work queues being a feature of hybrid work pools only — https://github.com/PrefectHQ/prefect/pull/11651 +- Update interactive workflow guide description and heading — https://github.com/PrefectHQ/prefect/pull/11663 +- Add API reference documentation for `wait_for_flow_run` — https://github.com/PrefectHQ/prefect/pull/11668 +- Remove duplicate line in `prefect deploy` docs — https://github.com/PrefectHQ/prefect/pull/11644 +- Update README to clearly mention running the Python file before starting server — https://github.com/PrefectHQ/prefect/pull/11643 +- Fix typo in `Modal` infrastructure documentation — https://github.com/PrefectHQ/prefect/pull/11676 + +## New Contributors + +- @N-Demir made their first contribution in https://github.com/PrefectHQ/prefect/pull/11633 +- @sgbaird made their first contribution in https://github.com/PrefectHQ/prefect/pull/11644 +- @bjarneschroeder made their first contribution in https://github.com/PrefectHQ/prefect/pull/10938 +- @Fizzizist made their first contribution in https://github.com/PrefectHQ/prefect/pull/11305 +- @NeodarZ made their first contribution in https://github.com/PrefectHQ/prefect/pull/11163 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.15...2.14.16 + +## Release 2.14.15 + +### Fixes + +- Fix an issue where setting `UI_SERVE_BASE` to an empty string or "/" led to incorrect asset urls - https://github.com/PrefectHQ/prefect/pull/11628 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.14...2.14.15 + +## Release 2.14.14 + +## Support for custom prefect.yaml deployment configuration files + +You can now specify a `prefect.yaml` deployment configuration file while running `prefect deploy` by using the +`--prefect-file` command line argument. This means that your configuration files can be in any directory +and can follow your own naming conventions. Using this feature provides more flexibility in defining +and managing your deployments. + +See the following PR for implementation details: + +- https://github.com/PrefectHQ/prefect/pull/11511 +- https://github.com/PrefectHQ/prefect/pull/11624 + +## Toggle Deployment Schedule Status via `prefect.yaml` + +You can now toggle your deployment schedules between `active` and `inactive` in your `prefect.yaml` configuration file. This enables you to create deployments with initially _inactive_ schedules, allowing for thorough testing or staged rollouts! + +See the following PR for implementation details: + +- https://github.com/PrefectHQ/prefect/pull/11608 + +## Support for Python 3.12 + +You can now install `prefect` using Python 3.12! This support is experimental and will be hardened in future releases. + +See the following PR for implementation details: + +- https://github.com/PrefectHQ/prefect/pull/11306 + +### Enhancements + +- Add an option through the CLI and Python client to remove schedules from deployments — https://github.com/PrefectHQ/prefect/pull/11353 +- Add client methods to interact with global concurrency limit APIs — https://github.com/PrefectHQ/prefect/pull/11415 +- Make `name` optional when saving an existing block — https://github.com/PrefectHQ/prefect/pull/11592 +- Make marking a flow as a subflow in `run_deployment` optional — https://github.com/PrefectHQ/prefect/pull/11611 +- Improve IDE support for `PrefectObjectRegistry.register_instances` decorated classes — https://github.com/PrefectHQ/prefect/pull/11617 +- Make the UI accessible via reverse proxy and add a `--no-install` flag to `prefect dev build-ui` — https://github.com/PrefectHQ/prefect/pull/11489 +- Improve UI build during `prefect server start` - https://github.com/PrefectHQ/prefect/pull/11493 +- Improve error message in `.deploy` — https://github.com/PrefectHQ/prefect/pull/11615 + +### Fixes + +- Use default values (if any) when no run input is provided upon `resume` — https://github.com/PrefectHQ/prefect/pull/11598 +- Prevent deployments with `RRule` schedules containing `COUNT` — https://github.com/PrefectHQ/prefect/pull/11600 +- Fix flows with class-based type hints based on `from __future__ import annotations` — https://github.com/PrefectHQ/prefect/pull/11578 & https://github.com/PrefectHQ/prefect/pull/11616 +- Raise `StepExecutionError` on non-zero `run_shell_script` return code during `prefect deploy` — https://github.com/PrefectHQ/prefect/pull/11604 + +### Experimental + +- Enable flow runs to receive typed input from external sources — https://github.com/PrefectHQ/prefect/pull/11573 + +### Documentation + +- Fix non-rendering link in Docker guide — https://github.com/PrefectHQ/prefect/pull/11574 +- Update deployment and flow concept docs — https://github.com/PrefectHQ/prefect/pull/11576 +- Add examples for custom triggers to automations docs — https://github.com/PrefectHQ/prefect/pull/11589 +- Add send/receive documentation to `run_input` module docstring — https://github.com/PrefectHQ/prefect/pull/11591 +- Add automations guide — https://github.com/PrefectHQ/prefect/pull/10559 +- Fix storage guide links and reference — https://github.com/PrefectHQ/prefect/pull/11602 +- Fix typo in `prefect deploy` guide — https://github.com/PrefectHQ/prefect/pull/11606 +- Fix imports in human-in-the-loop workflows guide example — https://github.com/PrefectHQ/prefect/pull/11612 +- Add missing imports to human-in-the-loop workflows example — https://github.com/PrefectHQ/prefect/pull/11614 +- Fix formatting in `prefect deploy` guide — https://github.com/PrefectHQ/prefect/pull/11562 +- Remove "Notification blocks must be pre-configured" warning from automations docs — https://github.com/PrefectHQ/prefect/pull/11569 +- Update work pools concept docs example to use correct entrypoint — https://github.com/PrefectHQ/prefect/pull/11584 +- Add incident, metric, and deployment status info to automations docs - https://github.com/PrefectHQ/prefect/pull/11625 + +### New Contributors + +- @brett-koonce made their first contribution in https://github.com/PrefectHQ/prefect/pull/11562 +- @jitvimol made their first contribution in https://github.com/PrefectHQ/prefect/pull/11584 +- @oz-elhassid made their first contribution in https://github.com/PrefectHQ/prefect/pull/11353 +- @Zyntogz made their first contribution in https://github.com/PrefectHQ/prefect/pull/11415 +- @Andrew-S-Rosen made their first contribution in https://github.com/PrefectHQ/prefect/pull/11578 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.13...2.14.14 + +## Release 2.14.13 + +## Access default work pool configurations in an air-gapped environment + +Those who run Prefect server in an environment where arbitrary outbound internet traffic is not allowed were previously unable to retrieve up-to-date default work pool configurations (via the UI or otherwise). You can now access the worker metadata needed to access the corresponding work pool configurations in your server even in such an air-gapped environment. Upon each release of `prefect`, the most recent version of this worker metadata will be embedded in the `prefect` package so that it can be used as a fallback if the outbound call to retrieve the real-time metadata fails. + +See the following PR for implementation details: + +- https://github.com/PrefectHQ/prefect/pull/11503 + +## Introducing conditional task retries for enhanced workflow control + +In this release, we're excited to introduce the ability to conditionally retry tasks by passing in an argument to `retry_condition_fn` in your task decorator, enabling more nuanced and flexible retry mechanisms. This adds a significant level of control and efficiency, particularly in handling complex or unpredictable task outcomes. For more information on usage, check out our [docs](https://github.com/PrefectHQ/prefect/pull/11535)! + +See the following PR for implementation details: + +- https://github.com/PrefectHQ/prefect/pull/11500 + +### Enhancements + +- Add `prefect cloud open` to open current workspace in browser from CLI — https://github.com/PrefectHQ/prefect/pull/11519 +- Implement `SendNotification` action type for programmatic Automations — https://github.com/PrefectHQ/prefect/pull/11471 +- Display work queue status details via CLI — https://github.com/PrefectHQ/prefect/pull/11545 +- Allow users to add date ranges "Around a time" when filtering by date - https://github.com/PrefectHQ/prefect-design/pull/1069 + +### Fixes + +- Validate deployment name in `.deploy` — https://github.com/PrefectHQ/prefect/pull/11539 +- Ensure `flow.from_source` handles remote git repository updates — https://github.com/PrefectHQ/prefect/pull/11547 + +### Documentation + +- Add documentation for Incidents feature in Prefect Cloud + — https://github.com/PrefectHQ/prefect/pull/11504 + - https://github.com/PrefectHQ/prefect/pull/11532 + - https://github.com/PrefectHQ/prefect/pull/11506 + - https://github.com/PrefectHQ/prefect/pull/11508 +- Add security README — https://github.com/PrefectHQ/prefect/pull/11520 +- Add conditional pause example to flow documentation — https://github.com/PrefectHQ/prefect/pull/11536 +- Add API modules to Python SDK docs — https://github.com/PrefectHQ/prefect/pull/11538 +- Update human-in-the-loop documentation — https://github.com/PrefectHQ/prefect/pull/11497 +- Improve formatting in quickstart and tutorial — https://github.com/PrefectHQ/prefect/pull/11502 +- Fix typo in quickstart — https://github.com/PrefectHQ/prefect/pull/11498 +- Fix broken link — https://github.com/PrefectHQ/prefect/pull/11507 +- Fix method name typo in tasks tutorial — https://github.com/PrefectHQ/prefect/pull/11523 +- Remove redundant word typo — https://github.com/PrefectHQ/prefect/pull/11528 + +### Collections + +- Add `LambdaFunction` block to `prefect-aws` to easily configure and invoke AWS Lambda functions - https://github.com/PrefectHQ/prefect-aws/pull/355 + +### Contributors + +- @yifanmai made their first contribution in https://github.com/PrefectHQ/prefect/pull/11523 +- @dominictarro +- @ConstantinoSchillebeeckx + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.12...2.14.13 + +## Release 2.14.12 + +### Increased customization of date and time filters across the UI + +Building on the enhancements to the dashboard we made in last week's release, we've updated the flow runs page to support relative time spans such as "Past 7 days". These changes make it easier to quickly see what's recently occurred (e.g. "Past 1 hour") and what's coming up next (e.g. "Next 15 minutes"). You can also select and filter by specific date and time ranges. + +We have also updated saved filters on the flow runs page so you can save date ranges as part of a custom filter. For example, it's now possible to create a view of the past 6 hours of runs for a specific work pool! + +The Flows page uses the same updated date and time filters so you have more control over how you filter and view runs. + +View a demonstration here: [![short loom video demo](https://github.com/PrefectHQ/prefect/assets/42048900/4dc01ec0-0776-49b4-bbc4-a1472c612e4f)](https://www.loom.com/share/95113969257d4cffa48ad13f943f950f?sid=b20bc27c-0dc2-40be-a627-a2148942c427) + +See the following PRs for implementation details: + +- https://github.com/PrefectHQ/prefect/pull/11473 +- https://github.com/PrefectHQ/prefect/pull/11481 + +### Get type-checked input from humans in the loop + +Human-in-the-loop flows just got an upgrade. You can now pause or suspend a flow and wait for type-checked input. To get started, declare the structure of the input data using a Pydantic model, and Prefect will render a form dynamically in the UI when a human resumes the flow. Form validation will ensure that the data conforms to your Pydantic model, and your flow will receive the input. + +image + +Prefect's new `RunInput` class powers this experience. `RunInput` is a subclass of Pydantic's `BaseModel`. Here's an example of a `RunInput` that uses dates, literals, and nested Pydantic models to show you what's possible: + +```python +class Person(RunInput): + first_name: str + last_name: str + birthday: datetime.date + likes_tofu: bool + age: int = Field(gt=0, lt=150) + shirt_size: Literal[ShirtSize.SMALL, ShirtSize.MEDIUM, ShirtSize.LARGE, + ShirtSize.XLARGE] + shirt_color: Literal["red", "blue", "green"] + preferred_delivery_time: datetime.datetime + shipping_address: ShippingAddress + billing_address: BillingAddress | SameAsShipping = Field( + title="", default_factory=SameAsShipping + ) +``` + +Check out our [guide on how to create human-in-the-loop flows](https://docs.prefect.io/latest/guides/creating-human-in-the-loop-workflows/) to learn more! + +### Enhancements + +- Update default pause/suspend timeout to 1 hour — https://github.com/PrefectHQ/prefect/pull/11437 + +### Fixes + +- Resolve environment variables during `prefect deploy` — https://github.com/PrefectHQ/prefect/pull/11463 +- Fix prompt and role assignment in `ContainerInstanceProvisioner` — https://github.com/PrefectHQ/prefect/pull/11440 +- Ensure dashboard header is responsive to varying tag and date input sizes — https://github.com/PrefectHQ/prefect/pull/11427 +- Fix error when deploying a remotely loaded flow with options — https://github.com/PrefectHQ/prefect/pull/11484 + +### Experimental + +- Remove title/description from `RunInput` model — https://github.com/PrefectHQ/prefect/pull/11438 + +### Documentation + +- Add guide to optimizing your code for big data — https://github.com/PrefectHQ/prefect/pull/11225 +- Add guide for integrating Prefect with CI/CD via GitHub Actions — https://github.com/PrefectHQ/prefect/pull/11443 +- Expand upon managed execution and provisioned infrastructure push work pool in tutorial — https://github.com/PrefectHQ/prefect/pull/11444 +- Revise Quickstart to include benefits, remote execution, and core concepts — https://github.com/PrefectHQ/prefect/pull/11461 +- Add additions to human-in-the-loop documentation — https://github.com/PrefectHQ/prefect/pull/11487 +- Rename guide on reading and writing data to and from cloud provider storage - https://github.com/PrefectHQ/prefect/pull/11441 +- Update formatting and work pool docs — https://github.com/PrefectHQ/prefect/pull/11479 +- Add documentation for `wait_for_input` — https://github.com/PrefectHQ/prefect/pull/11404 +- Fix typo in documentation on`prefect deploy` — https://github.com/PrefectHQ/prefect/pull/11488 +- Add troubleshooting instructions for agents — https://github.com/PrefectHQ/prefect/pull/11475 +- Update README example and language - https://github.com/PrefectHQ/prefect/pull/11171 +- Fix workers graph rendering — https://github.com/PrefectHQ/prefect/pull/11455 + +### Contributors + +- @1beb made their first contribution in https://github.com/PrefectHQ/prefect/pull/11475 +- @KMDgit made their first contribution in https://github.com/PrefectHQ/prefect/pull/11488 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.11...2.14.12 + +## Release 2.14.11 + +### Customize resource names when provisioning infrastructure for push work pools + +In the past few releases, we've added the ability to provision infrastructure for push work pools via the CLI. This release adds the ability to customize the name of the resources created in your cloud environment when provisioning infrastructure for push work pools so you can follow your organization's naming conventions. + +To customize your resource names when provisioning infrastructure for a push work pool, follow the interactive prompts: + +```bash +? Proceed with infrastructure provisioning with default resource names? [Use arrows to move; enter to select] +┏━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ ┃ Options: ┃ +┡━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ │ Yes, proceed with infrastructure provisioning with default resource names │ +│ > │ Customize resource names │ +│ │ Do not proceed with infrastructure provisioning │ +└────┴───────────────────────────────────────────────────────────────────────────┘ +? Please enter a name for the resource group (prefect-aci-push-pool-rg): new-rg +? Please enter a name for the app registration (prefect-aci-push-pool-app): new-app +? Please enter a prefix for the Azure Container Registry (prefect): newregistry +? Please enter a name for the identity (used for ACR access) (prefect-acr-identity): new-identity +? Please enter a name for the ACI credentials block (new-work-pool-push-pool-credentials): new-aci-block +╭───────────────────────────────────────────────────────────────────────────────────────────╮ +│ Provisioning infrastructure for your work pool new-work-pool will require: │ +│ │ +│ Updates in subscription: Azure subscription 1 │ +│ │ +│ - Create a resource group in location: eastus │ +│ - Create an app registration in Azure AD: new-app │ +│ - Create/use a service principal for app registration │ +│ - Generate a secret for app registration │ +│ - Create an Azure Container Registry with prefix newregistry │ +│ - Create an identity new-identity to allow access to the created registry │ +│ - Assign Contributor role to service account │ +│ - Create an ACR registry for image hosting │ +│ - Create an identity for Azure Container Instance to allow access to the registry │ +│ │ +│ Updates in Prefect workspace │ +│ │ +│ - Create Azure Container Instance credentials block: new-aci-block │ +│ │ +╰───────────────────────────────────────────────────────────────────────────────────────────╯ +Proceed with infrastructure provisioning? [y/n]: y +Creating resource group +Resource group 'new-rg' created successfully +Creating app registration +App registration 'new-app' created successfully +Generating secret for app registration +Secret generated for app registration with client ID '03923189-3151-4acd-8d59-76483752cd39' +Creating ACI credentials block +ACI credentials block 'new-aci-block' created in Prefect Cloud +Assigning Contributor role to service account +Service principal created for app ID '25329389-3151-4acd-8d59-71835252cd39' +Contributor role assigned to service principal with object ID '483h4c85-4a8f-4fdb-0394-bd0f0b1202d0' +Creating Azure Container Registry +Registry created +Logged into registry newregistry1702538242q2z2.azurecr.io +Creating identity +Identity 'new-identity' created +Provisioning infrastructure. ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 +Your default Docker build namespace has been set to 'newregistry1702538242q2z2.azurecr.io'. +Use any image name to build and push to this registry by default: + +╭─────────────────────────── example_deploy_script.py ───────────────────────────╮ +│ from prefect import flow │ +│ from prefect.deployments import DeploymentImage │ +│ │ +│ │ +│ @flow(log_prints=True) │ +│ def my_flow(name: str = "world"): │ +│ print(f"Hello {name}! I'm a flow running on an Azure Container Instance!") │ +│ │ +│ │ +│ if __name__ == "__main__": │ +│ my_flow.deploy( │ +│ name="my-deployment", │ +│ work_pool_name="my-work-pool", │ +│ image=DeploymentImage( │ +│ name="my-image:latest", │ +│ platform="linux/amd64", │ +│ ) │ +│ ) │ +╰────────────────────────────────────────────────────────────────────────────────╯ +Infrastructure successfully provisioned for 'new-work-pool' work pool! +Created work pool 'new-work-pool'! +``` + +Using a push work pool with automatic infrastructure provisioning is a great way to get started with a production-level Prefect set up in minutes! Check out our [push work pool guide](https://docs.prefect.io/latest/guides/deployment/push-work-pools/) for step-by-step instructions on how to get started! + +See the following pull requests for implementation details: + +- https://github.com/PrefectHQ/prefect/pull/11407 +- https://github.com/PrefectHQ/prefect/pull/11381 +- https://github.com/PrefectHQ/prefect/pull/11412 + +### An updated date time input on the workspace dashboard + +We've added a new date and time filter to the workspace dashboard that gives greater control over the dashboard. You can now filter by days, hours, and even minutes. You can also specify a specific date and time range to filter by. You can also go backwards and forwards in time using that time window, for example, you can scroll through by hour. + +See it in action! +[![Demo of updated time input in the Prefect UI](https://github.com/PrefectHQ/prefect/assets/40272060/045b144f-35ff-4b32-abcd-74eaf16f181c) +](https://www.loom.com/share/ca099d3792d146d08df6fcd506ff9eb2?sid=70797dda-6dc6-4fe6-bf4a-a9df2a0bf230) + +See the following pull requests for implementation details: + +- https://github.com/PrefectHQ/prefect-ui-library/pull/1937 +- https://github.com/PrefectHQ/prefect-design/pull/1048 + +### Enhancements + +- Add the ability to publish `KubernetesJob` blocks as work pools — https://github.com/PrefectHQ/prefect/pull/11347 +- Add setting to configure a default Docker namespace for image builds — https://github.com/PrefectHQ/prefect/pull/11378 +- Add the ability to provision an ECR repository for ECS push work pools — https://github.com/PrefectHQ/prefect/pull/11382 +- Add ability to provision an Artifact Registry repository for Cloud Run push work pools — https://github.com/PrefectHQ/prefect/pull/11399 +- Add ability to provision an Azure Container Registry for Azure Container Instance push work pools — https://github.com/PrefectHQ/prefect/pull/11387 +- Add support for `is_schedule_active` to `flow.deploy` and `flow.serve` — https://github.com/PrefectHQ/prefect/pull/11375 +- Allow users to select relative and fixed date ranges to filter the dashboard — https://github.com/PrefectHQ/prefect/pull/11406 +- Add support for arbitrary sink types to `prefect.utilities.processutils.stream_text` — https://github.com/PrefectHQ/prefect/pull/11298 +- Update the Prefect UI deployments page to add run activity and separate out the deployment and flow names — https://github.com/PrefectHQ/prefect/pull/11394 +- Update Prefect UI workspace dashboard filters to use new date range - https://github.com/PrefectHQ/prefect-ui-library/pull/1937 + +### Fixes + +- Fix bug where a pause state reused an existing state ID — https://github.com/PrefectHQ/prefect/pull/11405 + +### Experimental + +- Build out API for creating/reading/deleting flow run inputs — https://github.com/PrefectHQ/prefect/pull/11363 +- Integrate flow run input and schema/response mechanics into pause/suspend — https://github.com/PrefectHQ/prefect/pull/11376 +- Add typing overloads for pause/suspend methods — https://github.com/PrefectHQ/prefect/pull/11403 +- Use bytes for `value` in `create_flow_run_input` — https://github.com/PrefectHQ/prefect/pull/11421 +- Validate run input when resuming flow runs — https://github.com/PrefectHQ/prefect/pull/11396 +- Run existing deployments via the `Runner` webserver — https://github.com/PrefectHQ/prefect/pull/11333 + +### Documentation + +- Add instructions for automatic infrastructure provisioning to the push work pools guide — https://github.com/PrefectHQ/prefect/pull/11316 +- Fix broken links in states concept doc and daemonize guide — https://github.com/PrefectHQ/prefect/pull/11374 +- Update agent upgrade guide to include `flow.deploy` and examples — https://github.com/PrefectHQ/prefect/pull/11373 +- Update block document names in Moving Data guide — https://github.com/PrefectHQ/prefect/pull/11386 +- Rename `Guides` to` How-to Guides` — https://github.com/PrefectHQ/prefect/pull/11388 +- Add guide to provision infrastructure for existing push work pools — https://github.com/PrefectHQ/prefect/pull/11365 +- Add documentation for required permissions for infrastructure provisioning — https://github.com/PrefectHQ/prefect/pull/11417 +- Add docs for managed execution open beta — https://github.com/PrefectHQ/prefect/pull/11397, https://github.com/PrefectHQ/prefect/pull/11426, and https://github.com/PrefectHQ/prefect/pull/11425 + +### Contributors + +- @j-tr + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.10...2.14.11 + +## Release 2.14.10 + +### Azure Container Instance push pool infrastructure provisioning via the CLI + +We're introducing an enhancement to the Azure Container Instance push pool experience. You can now conveniently provision necessary Azure infrastructure with the `--provision-infra` flag during work pool creation, automating the provisioning of various Azure resources essential for ACI push pools, including resource groups, app registrations, service accounts, and more. + +To provision Azure resources when creating an ACI push pool: + +```bash +❯ prefect work-pool create my-work-pool --provision-infra --type azure-container-instance:push +? Please select which Azure subscription to use: [Use arrows to move; enter to select] +┏━━━━┳━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ ┃ Name ┃ Subscription ID ┃ +┡━━━━╇━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ │ Engineering │ 123 │ +│ > │ Azure subscription 1 │ 234 │ +└────┴──────────────────────┴──────────────────────────────────────┘ +╭───────────────────────────────────────────────────────────────────────────────────────╮ +│ Provisioning infrastructure for your work pool my-work-pool will require: │ +│ │ +│ Updates in subscription Azure subscription 1 │ +│ │ +│ - Create a resource group in location eastus │ +│ - Create an app registration in Azure AD │ +│ - Create a service principal for app registration │ +│ - Generate a secret for app registration │ +│ - Assign Contributor role to service account │ +│ - Create Azure Container Instance │ +│ │ +│ Updates in Prefect workspace │ +│ │ +│ - Create Azure Container Instance credentials block aci-push-pool-credentials │ +│ │ +╰───────────────────────────────────────────────────────────────────────────────────────╯ +Proceed with infrastructure provisioning? [y/n]: y +Creating resource group +Provisioning infrastructure... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 0% -:--:--Resource group 'prefect-aci-push-pool-rg' created in location 'eastus' +Creating app registration +Provisioning infrastructure... ━━━━━━━━╺━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 20% -:--:--App registration 'prefect-aci-push-pool-app' created successfully +Generating secret for app registration +Provisioning infrastructure... ━━━━━━━━━━━━━━━━╺━━━━━━━━━━━━━━━━━━━━━━━ 40% 0:00:06Secret generated for app registration with client ID 'abc' +ACI credentials block 'aci-push-pool-credentials' created +Assigning Contributor role to service account... +Provisioning infrastructure... ━━━━━━━━━━━━━━━━━━━━━━━━╺━━━━━━━━━━━━━━━ 60% 0:00:06Contributor role assigned to service principal with object ID 'xyz' +Creating Azure Container Instance +Provisioning infrastructure... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╺━━━━━━━ 80% 0:00:04Container instance 'prefect-acipool-container' created successfully +Creating Azure Container Instance credentials block +Provisioning infrastructure... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 +Infrastructure successfully provisioned for 'my-work-pool' work pool! +Created work pool 'my-work-pool'! +``` + +This marks a step forward in Prefect's Azure capabilities, offering you an efficient and streamlined process for leveraging Azure Container Instances to execute their workflows. + +See the following pull request for implementation details: +— https://github.com/PrefectHQ/prefect/pull/11275 + +### Introducing the `provision-infra` sub-command for enhanced push work pool management + +This enhancement allows you to directly provision infrastructure for existing push work pools. Rather than recreating a work pool, you can provision necessary infrastructure and +update the existing work pool base job template with the following command: + +```bash +❯ prefect work-pool provision-infra my-work-pool +? Please select which Azure subscription to use: [Use arrows to move; enter to select] +┏━━━━┳━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ ┃ Name ┃ Subscription ID ┃ +┡━━━━╇━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ │ Engineering │ 13d │ +│ > │ Azure subscription 1 │ 6h4 │ +└────┴──────────────────────┴──────────────────────────────────────┘ +╭────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ Provisioning infrastructure for your work pool my-work-pool will require: │ +│ │ +│ Updates in subscription Azure subscription 1 │ +│ │ +│ - Create a resource group in location eastus │ +│ - Create an app registration in Azure AD prefect-aci-push-pool-app │ +│ - Create/use a service principal for app registration │ +│ - Generate a secret for app registration │ +│ - Assign Contributor role to service account │ +│ - Create Azure Container Instance 'aci-push-pool-container' in resource group prefect-aci-push-pool-rg │ +│ │ +│ Updates in Prefect workspace │ +│ │ +│ - Create Azure Container Instance credentials block aci-push-pool-credentials │ +│ │ +╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +Proceed with infrastructure provisioning? [y/n]: y +... +``` + +This PR bolsters support for efficient work pool management across diverse cloud environments, delivering a tool for seamless infrastructure setup. + +See the following pull request for implementation details: + +- https://github.com/PrefectHQ/prefect/pull/11341 +- https://github.com/PrefectHQ/prefect/pull/11355 + +### Enhancements + +- Add a `suspend_flow_run` method to suspend a flow run — https://github.com/PrefectHQ/prefect/pull/11291 +- Limit the displayed work pool types when `--provision-infra` is used to only show supported work pool types - https://github.com/PrefectHQ/prefect/pull/11350 +- Add the ability to publish `Infrastructure` blocks as work pools — https://github.com/PrefectHQ/prefect/pull/11180 +- Add the ability to publish `Process` blocks as work pools — https://github.com/PrefectHQ/prefect/pull/11346 +- Add a Prefect Cloud event stream subscriber — https://github.com/PrefectHQ/prefect/pull/11332 +- Enable storage of key/value information associated with a flow run — https://github.com/PrefectHQ/prefect/pull/11342 +- Delete flow run inputs when the corresponding flow run is delete — https://github.com/PrefectHQ/prefect/pull/11352 + +### Fixes + +- Fix the `read_logs` return type to be `List[Log]` — https://github.com/PrefectHQ/prefect/pull/11303 +- Fix an issue causing paused flow runs to become stuck in the `Paused` state — https://github.com/PrefectHQ/prefect/pull/11284 + +### Documentation + +- Combine troubleshooting pages — https://github.com/PrefectHQ/prefect/pull/11288 +- Add Google Cloud Run V2 option to Serverless guide — https://github.com/PrefectHQ/prefect/pull/11304 +- Add `suspend_flow_run` to flows documentation — https://github.com/PrefectHQ/prefect/pull/11300 +- Add `work queues` tag to work pools concept page — https://github.com/PrefectHQ/prefect/pull/11320 +- Add missing Python SDK CLI items to the docs — https://github.com/PrefectHQ/prefect/pull/11289 +- Clarify SCIM + service accounts handling — https://github.com/PrefectHQ/prefect/pull/11343 +- Update the work pool concept document — https://github.com/PrefectHQ/prefect/pull/11331 + +### Contributors + +- @tekumara + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.9...2.14.10 + +## Release 2.14.9 + +### Automatic infrastructure provisioning for ECS push work pools + +Following the introduction of [automatic project configuration for Cloud Run push pools](https://github.com/PrefectHQ/prefect/blob/main/RELEASE-NOTES.md#automatic-project-configuration-for-cloud-run-push-work-pools) last week, we've added the ability to automatically provision infrastructure in your AWS account and set up your Prefect workspace to support a new ECS push pool! + +You can create a new ECS push work pool and provision infrastructure in your AWS account with the following command: + +```bash +prefect work-pool create --type ecs:push --provision-infra my-pool +``` + +Using the `--provision-infra` flag will automatically set up your default AWS account to be ready to execute flows via ECS tasks: + +``` +╭───────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ Provisioning infrastructure for your work pool my-work-pool will require: │ +│ │ +│ - Creating an IAM user for managing ECS tasks: prefect-ecs-user │ +│ - Creating and attaching an IAM policy for managing ECS tasks: prefect-ecs-policy │ +│ - Storing generated AWS credentials in a block │ +│ - Creating an ECS cluster for running Prefect flows: prefect-ecs-cluster │ +│ - Creating a VPC with CIDR 172.31.0.0/16 for running ECS tasks: prefect-ecs-vpc │ +╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +Proceed with infrastructure provisioning? [y/n]: y +Provisioning IAM user +Creating IAM policy +Generating AWS credentials +Creating AWS credentials block +Provisioning ECS cluster +Provisioning VPC +Creating internet gateway +Setting up subnets +Setting up security group +Provisioning Infrastructure ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 +Infrastructure successfully provisioned! +Created work pool 'my-pool'! +``` + +If you have yet to try using an ECS push pool, now is a great time! + +If you use Azure, don't fret; we will add support for Azure Container Instances push work pools in a future release! + +See the following pull request for implementation details: +— https://github.com/PrefectHQ/prefect/pull/11267 + +### Enhancements + +- Make flows list on Flows page in the Prefect UI a scannable table — https://github.com/PrefectHQ/prefect/pull/11274 + +### Fixes + +- Fix `.serve` crashes due to process limiter — https://github.com/PrefectHQ/prefect/pull/11264 +- Fix URL formatting in `GitRepository` when using provider-specific git credentials blocks — https://github.com/PrefectHQ/prefect/pull/11282 +- Prevent excessively escaping the Windows executable — https://github.com/PrefectHQ/prefect/pull/11253 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.8...2.14.9 + +## Release 2.14.8 + +This release is a follow-up to 2.14.7 which never made it to PyPI because of an issue with our Github workflow. + +### Documentation + +- Fix broken docs link in serverless worker documentation — https://github.com/PrefectHQ/prefect/pull/11269 + +**All changes**: https://github.com/PrefectHQ/prefect/compare/2.14.7...2.14.8 + +## Release 2.14.7 + +This release fixes a bug introduced in 2.14.6 where deployments with default Docker image builds looked for images tagged `v2.14.6` instead of `2.14.6`. Users of `2.14.6` should upgrade if planning to create deployments with an image other than a custom image. + +### Enhancements + +- Use a new route to read work pool types when connected to Prefect Cloud — +- Add `parent_flow_run_id` as a new API filter for flow runs — + +### Fixes + +- Allow more than one dependency package in the requirements of a push or pull step — + +### Documentation + +- Add serverless work pool landing page — +- Update Azure Container Instance guide to reflect current Azure Portal interface and Prefect UI — +- Update imports in **Flows** concept page example — + +### New Contributors + +- @oakbramble made their first contribution in + +**All changes**: + +## Release 2.14.6 + +### View the next run for a deployment at a glance + +You can now see the next run for a deployment in the Runs tab of the Deployments page in the Prefect UI! Upcoming runs are now located in a dedicated tab, making the most relevant running and completed flow runs more apparent. + +Click below to see it in action! +[![Demo of next run for a deployment](https://github.com/PrefectHQ/prefect/assets/12350579/c6eee55a-c3c3-47bd-b2c1-9eb04139a376) +](https://github.com/PrefectHQ/prefect/assets/12350579/c1658f50-512a-4cd4-9d36-a523d3cc9ef0) + +See the following pull request for implementation details: +— + +### Automatic project configuration for Cloud Run push work pools + +Push work pools in Prefect Cloud simplify the setup and management of the infrastructure necessary to run your flows, but they still require some setup. With this release, we've enhanced the `prefect work-pool create` CLI command to automatically configure your GCP project and set up your Prefect workspace to use a new Cloud Run push pool immediately. + +Note: To take advantage of this feature, you'll need to have the `gcloud` CLI installed and authenticated with your GCP project. + +You can create a new Cloud Run push work pool and configure your project with the following command: + +```bash +prefect work-pool create --type cloud-run:push --provision-infra my-pool +``` + +Using the `--provision-infra` flag will allow you to select a GCP project to use for your work pool and automatically configure it to be ready to execute flows via Cloud Run: + +``` +╭──────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ Provisioning infrastructure for your work pool my-pool will require: │ +│ │ +│ Updates in GCP project central-kit-405415 in region us-central1 │ +│ │ +│ - Activate the Cloud Run API for your project │ +│ - Create a service account for managing Cloud Run jobs: prefect-cloud-run │ +│ - Service account will be granted the following roles: │ +│ - Service Account User │ +│ - Cloud Run Developer │ +│ - Create a key for service account prefect-cloud-run │ +│ │ +│ Updates in Prefect workspace │ +│ │ +│ - Create GCP credentials block my--pool-push-pool-credentials to store the service account key │ +│ │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +Proceed with infrastructure provisioning? [y/n]: y +Activating Cloud Run API +Creating service account +Assigning roles to service account +Creating service account key +Creating GCP credentials block +Provisioning Infrastructure ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 +Infrastructure successfully provisioned! +Created work pool 'my-pool'! +``` + +If you have yet to try using a Cloud Run run push pool, now is a great time! + +If you use another cloud provider, don't fret; we will add support for ECS and Azure Container Instances push work pools in future releases! + +See the following pull request for implementation details: +— + +### Enhancements + +- Add ability to search for block documents by name in the Prefect UI and API — +- Add pagination to the Blocks page in the Prefect UI for viewing/filtering more than 200 blocks — +- Include concurrency controls in `prefect-client` — + +### Fixes + +- Fix SQLite migration to work with older SQLite versions — +- Fix Subflow Runs tab filters and persist to URL in the Flow Runs page of the Prefect UI — + +### Documentation + +- Improve formatting in deployment guides — +- Add instructions for turning off the flow run logger to the unit testing guide — + +### Contributors + +- @ConstantinoSchillebeeckx + +**All changes**: + +## Release 2.14.5 + +### Storage block compatibility with `flow.from_source` + +You can now use all your existing storage blocks with `flow.from_source`! Using storage blocks with `from_source` is great when you need to synchronize your credentials and configuration for your code storage location with your flow run execution environments. Plus, because block configuration is stored server-side and pulled at execution time, you can update your code storage credentials and configuration without re-deploying your flows! + +Here's an example of loading a flow from a private S3 bucket and serving it: + +```python +from prefect import flow +from prefect_aws import AwsCredentials +from prefect_aws.s3 import S3Bucket + +if __name__ == "__main__": + flow.from_source( + source=S3Bucket( + bucket_name="my-code-storage-bucket", + credentials=AwsCredentials( + aws_access_key_id="my-access-key-id", + aws_secret_access_key="my-secret-access-key", + ), + ), + entrypoint="flows.py:my_flow", + ).serve(name="my-deployment") +``` + +Here's an example of loading and deploying a flow from an S3 bucket: + +```python +from prefect import flow +from prefect_aws.s3 import S3Bucket + +if __name__ == "__main__": + flow.from_source( + source=S3Bucket.load("my-code-storage-bucket"), entrypoint="flows.py:my_flow" + ).deploy(name="my-deployment", work_pool_name="above-ground") +``` + +Note that a storage block must be saved before deploying a flow, but not if you're serving a remotely stored flow. + +See the following pull request for implementation details: + +- + +### Enhancements + +- Add customizable host and port settings for worker webserver — +- Safely retrieve `flow_run_id` in `EventsWorker` while finding related events — +- Add client-side setting for specifying a default work pool — +- Allow configuration of task run tag concurrency slot delay transition time via setting — +- Enable enhanced flow run cancellation by default - + +### Fixes + +- Fix access token retrieval when using `GitRepository` with a private repo and `.deploy` — +- Fix bug where check for required packages fails incorrectly during `prefect deploy` — +- Fix routing to the Flows page from a flow run in the Prefect UI — +- Ensure the Prefect UI Flow Runs page reacts to filter changes - +- Optimize memory usage by clearing `args/kwargs` in a Prefect `Call` post-execution - +- Allow logs to handle un-`uuid`-like flow_run_ids - +- Only run unit tests for Python file changes — +- Add `codespell` config and add to pre-commit — +- Update token regex in release notes generation script for VSCode compatibility - + +### Documentation + +- Add Terraform Provider guide, update and simplify guides navigation — +- Clarify and harmonize Prefect Cloud documentation to reflect nomenclature and UX changes — +- Add information on Prefect Cloud to README — +- Update work pool-based deployment guide to include `.deploy` — +- Add Github information to auth-related Prefect Cloud documentation — +- Update workers tutorial — +- Update mkdocs material pin — +- Fix typo in audit log documentation — +- Fix typo in workers tutorial example — + +### Contributors + +- @yarikoptic made their first contribution in +- @taljaards + +**All changes**: + +## Release 2.14.4 + +### New improved flow run graph with dependency layout + +The flow run graph in the Prefect UI has been rebuilt from the ground up, offering significantly improved performance capabilities that allow larger flow runs to be displayed much more smoothly. We’ve added three new layouts: two non-temporal layout options, designed to provide a clearer picture of the dependency paths, and one to facilitate easy comparison of run durations. The x-axis can now be independently scaled for temporal layouts; and you can adjust it in the graph settings or with the new keyboard shortcuts - and +. We included additional small bug fixes, including the display of cached tasks. + +

+flow run graph sequential grid view +

+ +See the following pull requests for implementation details: + +- +- +- +- +- + +### Enhancements + +- Add API route for block counts — +- Improved tag handling on `DeploymentImage` for `.deploy`: + - + - +- Allow `image` passed into `.deploy` to be optional if loading flow from storage — +- Ensure client avoids image builds when deploying to managed work pools — +- Add `SIGTERM` handling to runner to gracefully handle timeouts — +- Allow tasks to use `get_run_logger` w/o parent flow run — +- Allow `ResultFactory` creation `from_task` when no `flow_run_context` — + +### Fixes + +- Avoid printing references to workers when deploying to managed pools — + +### Documentation + +- Fix docstring for `flow.deploy` method example — +- Add warning about image architecture to push pool guide — +- Move webhooks guide to `Development` section in guides index — + +**All changes**: + +## Release 2.14.3 + +### Observability with deployment status + +You can now track the status of your deployments in the Prefect UI, which is especially useful when serving flows as they have no associated work pool or worker. If you see a flow run enter a `LATE` state (it isn’t running), you can click into the deployment for that flow run and see a red indicator next to your deployment. The worker, runner, or agent polling that deployment or its associated work queue is offline. + +- Deployments created from served flows will have a `READY` status if its associated process is running. +- Deployments created in a work pool will have a `READY` status when a worker is `ONLINE` and polling the associated work queue. +- Deployments created in a push work pool (Prefect Cloud) will always have a `READY` status. + +

+a late flow run for a deployment that is `NOT_READY` +

+ +In Prefect Cloud, an event is emitted each time a deployment changes status. These events are viewable in the Event Feed. + +

+event feed deployment status events +

+ +You can also create an automation triggered by deployment status changes on the Automations page! + +

+deployment status trigger on automations page +

+ +See the following pull requests for implementation details: + +- +- +- +- + +### Additional storage options for `flow.from_source` + +You can now load flows from a variety of storage options with `flow.from_source`! In addition to loading flows from a git repository, you can load flows from any supported `fsspec` protocol. + +Here's an example of loading and serving a flow from an S3 bucket: + +```python +from prefect import flow + +if __name__ == "__main__": + flow.from_source( + source="s3://my-bucket/my-folder", + entrypoint="flows.py:my_flow", + ).serve(name="deployment-from-remote-flow") +``` + +You can use the `RemoteStorage` class to provide additional configuration options. + +Here's an example of loading and serving a flow from Azure Blob Storage with a custom account name: + +```python +from prefect import flow +from prefect.runner.storage import RemoteStorage + +if __name__ == "__main__": + flow.from_source( + source=RemoteStorage(url="az://my-container/my-folder", account_name="my-account-name"), + entrypoint="flows.py:my_flow", + ).serve(name="deployment-from-remote-flow") +``` + +See the following pull request for implementation details: + +- + +### Enhancements + +- Add option to skip building a Docker image with `flow.deploy` — +- Display placeholder on the variables page when no variables are present — +- Allow composite sort of block documents by `block_type_name` and name — +- Add option to configure a warning via `PREFECT_TASK_INTROSPECTION_WARN_THRESHOLD` if task parameter introspection takes a long time — + +### Fixes + +- Update cancellation cleanup service to allow for infrastructure teardown — +- Allow `password` to be provided in `credentials` for `GitRespository` — +- Enable page refresh loading for non dashboard pages — +- Allow runner to load remotely stored flows when running hooks — +- Fix reading of flow run graph with unstarted runs — +- Allow Pydantic V2 models in flow function signatures — +- Run `prefect-client` build workflow on reqs.txt updates — +- Skips unsupported Windows tests — +- Avoid yanked `pytest-asyncio==0.22.0` — + +### Documentation + +- Add guide to daemonize a worker or `.serve` process with systemd — +- Add clarification of term `task` in Global Concurrency docs — +- Update Global Concurrency guide to highlight general purpose use of concurrency limits — +- Update push work pools documentation to mention concurrency — +- Add documentation on Prefect Cloud teams — +- Update 2.14.2 release notes — +- Fix rendering of marketing banner on the Prefect dashboard — +- Fix typo in `README.md` — + +## New Contributors + +- @vatsalya-vyas made their first contribution in + +**All changes**: + +## Release 2.14.2 + +### Ability to pass \*\*kwargs to state change hooks + +You can now pass a partial (sometimes called ["curried"](https://www.geeksforgeeks.org/partial-functions-python/)) hook to your tasks and flows, allowing for more tailored post-execution behavior. + +```python +from functools import partial +from prefect import flow + +data = {} + +def my_hook(flow, flow_run, state, **kwargs): + data.update(state=state, **kwargs) + +@flow(on_completion=[partial(my_hook, my_arg="custom_value")]) +def lazy_flow(): + pass + +state = lazy_flow(return_state=True) + +assert data == {"my_arg": "custom_value", "state": state} +``` + +This can be used in conjunction with the `.with_options` method on tasks and flows to dynamically provide extra kwargs to your hooks, like [this example](https://docs.prefect.io/latest/concepts/states/#pass-kwargs-to-your-hooks) in the docs. + +See the following pull request for implementation details: + +- + +### Fixes + +- Moves responsibility for running `on_cancellation` and `on_crashed` flow hooks to runner when present — + +**All changes**: + +## Release 2.14.1 + +### Documentation + +- Add Python `serve` and `deploy` options to the `schedules` concepts documentation — + +### Fixes + +- Refine flow parameter validation to use the correct form of validation depending on if the parameter is a pydantic v1 or v2 model. — + +**All changes**: + +## Release 2.14.0 + +### Introducing the `prefect-client` + +This release provides a new way of running flows using the `prefect-client` package. This slimmed down version of `prefect` has a small surface area of functionality and is intended for interacting with the Prefect server or Prefect Cloud **only**. You can install `prefect-client` by using `pip`: + +```bash +pip install prefect-client +``` + +To use it, you will need to configure your environment to interact with a remote Prefect API by setting the `PREFECT_API_URL` and `PREFECT_API_KEY` environment variables. Using it in your code remains the same: + +```python +from prefect import flow, task + +@flow(log_prints=True) +def hello_world(): + print("Hello from prefect-client!") + +hello_world() +``` + +See implementation details in the following pull request: + +- + +### Enhancements + +- Add flow name to the label for subflow runs in the Prefect UI — + +### Fixes + +- Fix ability to pull flows and build deployments in Windows environments - +- Remove unnecessary work queue health indicator from push pools in the Prefect UI dashboard - +- Rename mismatched alembic file — + +### Documentation + +- Standardize heading capitalization in guide to developing a new worker type — +- Update Docker guide to mention image builds with `prefect.yaml` and `flow.deploy` — +- Update Kubernetes guide to mention and link to Python-based flow `deploy` creation method — + +## New Contributors + +- @m-steinhauer made their first contribution in + +- @maitlandmarshall made their first contribution in + +**All changes**: + +## Release 2.13.8 + +### Introducing `flow.deploy` + +When we released `flow.serve`, we introduced a radically simple way to deploy flows. Serving flows is perfect for many use cases, but the need for persistent infrastructure means serving flows may not work well for flows that require expensive or limited infrastructure. + +We're excited to introduce `flow.deploy` as a simple transition from running your served flows on persistent infrastructure to executing your flows on dynamically provisioned infrastructure via work pools and workers. `flow.deploy` ensures your flows execute consistently across environments by packaging your flow into a Docker image and making that image available to your workers when executing your flow. + +Updating your serve script to a deploy script is as simple as changing `serve` to `deploy`, providing a work pool to deploy to, and providing a name for the built image. + +Here's an example of a serve script: + +```python +from prefect import flow + + +@flow(log_prints=True) +def hello_world(name: str = "world", goodbye: bool = False): + print(f"Hello {name} from Prefect! 🤗") + + if goodbye: + print(f"Goodbye {name}!") + + +if __name__ == "__main__": + hello_world.serve( + name="my-first-deployment", + tags=["onboarding"], + parameters={"goodbye": True}, + interval=60, + ) +``` + +transitioned to a deploy script: + +```python +from prefect import flow + + +@flow(log_prints=True) +def hello_world(name: str = "world", goodbye: bool = False): + print(f"Hello {name} from Prefect! 🤗") + + if goodbye: + print(f"Goodbye {name}!") + + +if __name__ == "__main__": + hello_world.deploy( + name="my-first-deployment", + tags=["onboarding"], + parameters={"goodbye": True}, + interval=60, + work_pool_name="above-ground", + image='my_registry/hello_world:demo' + ) +``` + +You can also use `deploy` as a replacement for `serve` if you want to deploy multiple flows at once. + +For more information, check out our tutorial's newly updated [Worker & Work Pools](https://docs.prefect.io/latest/tutorial/workers/) section! + +See implementation details in the following pull requests: + +- +- +- + +### Enhancements + +- Add `last_polled` column to deployment table — +- Add `status` and `last_polled` to deployment API responses — +- Add flow run graph v2 endpoint tuned for UI applications — +- Add ability to convert `GitRepository` into `git_clone` deployment step — +- Update `/deployments/get_scheduled_flow_runs` endpoint to update deployment status — + +### Fixes + +- Clarify CLI prompt message for missing integration library for worker — +- Renamed `ruamel-yaml` to `ruamel.yaml` in `requirements.txt` — +- Clarify work pool banner on Work Pool page UI — + +### Documentation + +- Clean up `Using the Prefect Orchestration Client` guide — +- Add link to Coiled's documentation for hosting served flows — +- Clarify that access control lists do not affect related objects — +- Improve block-based deployment concept page metadata and admonitions — +- Update docs to prioritize workers over agents — +- Update work pools and workers tutorial to use `flow.deploy` — +- Move Docker image discussion to Docker guide — + +### Contributors + +- @lpequignot made their first contribution in + +**All changes**: + +## Release 2.13.7 + +### Enabling Pydantic V2 + +In 2.13.5 we released experimental support for Pydantic V2, which made it co-installable via forced install. In this release, we are enabling co-installation by default which will allow you to leverage Pydantic V2 in your flows and tasks. Additionally, you can choose to update to Pydantic V2 on your own timeline as we maintain compatibility with V1 within flows and tasks. + +See implementation details in the following pull request: + +- + +### Documentation + +- Fix typo in release notes - + +### Contributors + +- @taljaards + +**All changes**: + +## Release 2.13.6 + +### Specify a default result storage block as a setting + +Previously, specifying result storage blocks necessitated changes in the `@flow` or `@task` decorator. Now, the `PREFECT_DEFAULT_RESULT_STORAGE_BLOCK` setting allows users to set a default storage block on a work pool or via job variables for a deployment. For example, to set a default storage block for a deployment via `prefect.yaml`: + +```yaml +deployments: + - name: my-super-cool-deployment + entrypoint: some_directory/some_file.py:my_flow + schedule: + cron: "0 20 * * 1-5" + work_pool: + name: ecs-pool + job_variables: + env: + PREFECT_DEFAULT_RESULT_STORAGE_BLOCK: s3/my-s3-bucket-block-name +``` + +This enhancement enables easier swapping of result storages by just updating the environment in the UI or in your `prefect.yaml`, eliminating the need to alter your flow source code. + +See the following pull request for details: + +- + +### Experimental support for enhanced cancellation + +We're introducing a new experimental feature that will enable more consistent and reliable cancellation of flow runs. + +To enable enhanced cancellation, set the `PREFECT_EXPERIMENTAL_ENABLE_ENHANCED_CANCELLATION` setting on your worker or agents to `True`: + +```bash +prefect config set PREFECT_EXPERIMENTAL_ENABLE_ENHANCED_CANCELLATION=True +``` + +When enabled, you can cancel flow runs where cancellation can fail, such as when your worker is offline. We will continue to develop enhanced cancellation to improve its reliability and performance. If you encounter any issues, please let us know in Slack or with a Github issue. + +Note: If you are using the Kubernetes worker, you will need to update your `prefect-kubernetes` installation to `0.3.1`. If you are using the Cloud Run or Vertex AI workers, you will need to update your `prefect-gcp` installation to `0.5.1`. + +See the following pull requests for details: + +- +- + +### Enhancements + +- Add link to Prefect Cloud information in the Prefect UI — + +### Fixes + +- Avoid `prefect deploy` prompt for remote storage if a global pull step is already defined - + +### Documentation + +- Add a guide for using the Prefect client — +- Remove icons from side navigation for improved readability — +- Update deployments tutorial for consistent styling — +- Fix typo in CLI command in deployments tutorial — +- Fix typo in logging guide — +- Update documentation styling — + +### Contributors + +- @Sun-of-a-beach made their first contribution in + +- @manaw + +**All changes**: + +## Release 2.13.5 + +### Load and serve remotely stored flows + +You can now load and serve flows from a git repository! + +With the new `flow.from_source` method, you can specify a git repository and a path to a flow file in that repository. This method will return a flow object that can be run or served with `flow.serve()`. + +Here's an example of loading a flow from a git repository and serving it: + +```python +from prefect import flow + +if __name__ == "__main__": + flow.from_source( + source="https://github.com/org/repo.git", + entrypoint="path/to/flow.py:my_flow", + ).serve(name="deployment-from-remote-flow") +``` + +When you load and serve a flow from a git repository, the serving process will periodically poll the repository for changes. This means that you can update the flow in the repository and the changes will be reflected in the served flow without restarting the serve script! + +To learn more about loading and serving flows from a git repository, check out [the docs](https://docs.prefect.io/latest/concepts/flows/#retrieve-a-flow-from-remote-storage)! + +See the following pull requests for details: + +- +- + +### Experimental Pydantic 2 Compatibility + +We're working eagerly toward having `prefect` installable with either `pydantic<2` or `pydantic>2`. As a first step toward compatibility, we've ensured that Prefect's use of `pydantic` is isolated from _your_ use of `pydantic` in as many ways as possible. As of this release, `prefect` still has a stated `pydantic` requirement of `<2`, but we are testing against `pydantic>2` in our continuous integration tests. If you're feeling adventurous, feel free to manually install `pydantic>2` and run some flows with it. If you do, please let us know how it's going with a note in Slack or with a Github issue. + +See the following pull requests for details + +- +- +- +- +- +- +- + +### Enhancements + +- Use flow run context for default values in task run logger — +- Default `PREFECT_UI_API_URL` to relative path /api — +- Add blob storage options to `prefect deploy` — +- Add retries on responses with a 408 status code — + +### Fixes + +- Ensure agents only query work queues in `default-agent-pool` work pool if no pool is specified — +- Update `Runner` to correctly handle spaces in Python executable path — +- Update `PREFECT__FLOW_RUN_ID` environment variable to dash-delimited UUID format — +- Fix bug preventing importing `prefect` in a thread — + +### Documentation + +- Add GCP Vertex AI worker to worker types list in work pools documentation — +- Expound upon rate limit info and global concurrency use cases in concurrency guide — +- Point docker guide link to tutorial on workers — +- Clarify workers and work pools as an alternative to `.serve()` in tutorials — +- Fix typo in deployments concept page — +- Remove beta label from push work pool documentation — + +### Contributors + +- @alexmojaki made their first contribution in + +**All changes**: + +## Release 2.13.4 + +### Enhancements + +- Lift API and database constraints that require task runs to have an associated flow run id — + +### Fixes + +- Fix an issue with infinite scrolling on the sub flow runs tab in the UI - + +### Documentation + +- Add dark mode base job template screenshot to work pools documentation — +- Drop beta tag from push work pools documentation — +- Improve logo sizing and general housekeeping - + +## Release 2.13.3 + +## Allow configuration of a work pool's base job template via the CLI + +Previously, the creation and modification of work pools, including editing the base job template, were done through the Prefect UI. Now you can alter the base job template through CLI commands: + +Retrieve the default base job template for a given work pool: + +```bash +prefect work-pool get-default-base-job-template --type kubernetes +``` + +You can customize the base job template by passing a JSON file to the `--base-job-template` flag: + +```bash +prefect work-pool create my-k8s-pool --type kubernetes --base-job-template ./path/template.yaml +``` + +Useful for version control, you can now make updates to a work pool's base job template via the CLI: + +```bash +prefect work-pool update my-work-pool --base-job-template base-job-template.json --description "My work pool" --concurrency-limit 10 +``` + +See the documentation on [work pools](https://docs.prefect.io/latest/concepts/work-pools/) for more information, or see the following pull requests for implementation details: + +- +- +- +- +- + +## Allow users to customize their default flow runs view in the Prefect UI + +You can now set your own default filter view on your Flow Runs page! You must first save and name a view before you can set it as your default. This setting is only stored locally so it will not be shared across machines/browsers. + +image + +Note: The previous default view ("Default view") has been renamed to "Past week". + +## New Google Vertex AI work pool and worker + +- Run flows in containers on Google Vertex AI. +- Requires a Google Cloud Platform account and prefect-gcp library installed. Read more [here](https://prefecthq.github.io/prefect-gcp/vertex_worker/). + +### Enhancements + +- Display `pull_steps` on Deployments page in the Prefect UI — +- Add `/deployments/get_scheduled_flow_runs` endpoint for retrieving scheduled flow runs from deployments — +- Add flow run filter for fetching the first-level subflows for a given flow — + +### Fixes + +- Raise `RuntimeError` error if `pip_install_requirements` step fails — +- Use a fixed list of known collection registry views - + +### Documentation + +- Fix typos in documentation and codebase — +- Fix example in tasks concept documentation — +- Update `git_clone` deployment step example in documentation — +- Add `prefect deploy` guide to guide index for visibility — +- Fix warning in deployment storage guide documentation — + +### Contributors + +- @arthurgtllr made their first contribution in + +- @mj0nez + +**All changes**: + +## Release 2.13.2 + +### Opt-in server-side enforcement of deployment parameter schemas + +We've added the ability to enforce parameter schemas for deployments via the Prefect API! This feature will prevent creation of flow runs with parameters that are incompatible with deployed flows, allowing you to discover errors sooner and avoid provisioning infrastructure for flow runs destined to fail. + +Use `enforce_parameter_schema` when deploying your flow to guard against invalid parameters: + +```python +from prefect import flow +from pydantic import BaseModel + + +class Person(BaseModel): + name: str + greeting: str = "Hello" + + +@flow(log_prints=True) +def my_flow(person: Person, name: str = "world"): + print(f'{person.name} says, "{person.greeting}, {name}!"') + + +if __name__ == "__main__": + my_flow.serve( + "testing-params", + enforce_parameter_schema=True, + ) + +``` + +An attempt to run the created deployment with invalid parameters will fail and give a reason the flow run cannot be created: + +```bash +> prefect deployment run 'my-flow/testing-params' -p person='{"name": 1}' + +Error creating flow run: Validation failed for field 'person.name'. Failure reason: 1 is not of type 'string' +``` + +You can enable parameter enforcement via `prefect deploy` with the `--enforce-parameter-schema` flag or by setting `enforce_parameter_schema` to `True` in your `prefect.yaml` file. + +See the following pull request for details: + +- + +### Enhanced deployment flexibility with pattern-based deploying + +In an effort to increase flexibility and provide more powerful deployment options, this enhancement enables users to deploy flows based on a variety of patterns, facilitating versatile and dynamic deployment management: + +**Deploy all deployments for a specific flow:** + +```bash +prefect deploy -n flow-a/* +``` + +**Deploy all deployments for a specific deployment:** + +```bash +prefect deploy -n */prod +``` + +Note: This was previously possible in non-interactive mode with `prefect --no-prompt deploy -n prod` + +**Deploy all deployments containing a specified string in the flow name:** + +```bash +prefect deploy -n *extract*/* +``` + +**Deploy deployments with a mix of pattern matching styles** + +```bash +prefect deploy -n flow-a/* -n */prod +``` + +**Deploy deployments with a mix of pattern matching and without:** + +```bash +prefect deploy -n flow-a/* -n flow-b/default +``` + +See the following pull request for details: + +- + +### Enhancements + +- Add API route for work pool counts — +- Add CLI command to get default base job template — + +### Fixes + +- Make paths relative rather than absolute in the `prefect dev build-ui` command — +- Lower the upper bound on pinned pendulum library — +- Fix command handling in `run_shell_script` deployment step on Windows — +- Fix validation on concurrency limits — +- Fix Prefect variable resolution in deployments section of `prefect.yaml` — + +### Documentation + +- Update UI screenshot for role creation — +- Add `push work pools` tag to push work pools guide to raise visibility — +- Update docs with recent brand changes — +- Update Prefect Cloud quickstart guide to include new features — +- Fix broken diagram in workers tutorial — +- Add screenshots to artifacts concept page — +- Remove boost from block-based deployments page in documentation and improve visibility of `prefect deploy` — +- Add example of retrieving default base job template to work pools concept documentation — +- Add references to `enforce_parameter_schema` to docs — +- Add documentation for pattern matching in `prefect deploy` — + +### New contributors + +- @danielhstahl made their first contribution in + +- @morremeyer made their first contribution in +- @NikoRaisanen made their first contribution in + +**All changes**: + +## Release 2.13.1 + +### Hide subflow runs in the Prefect UI + +We’ve added the ability to filter out subflow runs from the list on the Flow Runs page! This feature is especially beneficial for those who frequently use subflows, making it easier to focus on parent flows with less clutter. + +![Hide subflows in UI demo](https://github.com/PrefectHQ/prefect/assets/31014960/7f6a9473-8003-4a90-8ff7-4d766623b38b) + +See the following for implementation details: + +- + +### Enhancements + +- Add `run_count` to `prefect.runtime.flow_run` — +- Add `run_count` to `prefect.runtime.task_run` — +- Allow passing deployment triggers via CLI with `prefect deploy` — +- Add `is_null` filter for deployments to `/flows/filter` endpoint — +- Show associated flow name on Custom Run page in the Prefect UI - +- Add ability to reset a task-based concurrency limit from the UI - +- Display error `details` returned by API - +- Add pagination to Deployments and Flows pages in the Prefect UI - +- Add opt-in to display large flow run graphs in Prefect UI - +- Add Prefect logo to UI sidebar and fix dashboard padding — +- Add ability to update existing deployment configurations with `prefect deploy` — + +### Fixes + +- Avoid creating unpersisted blocks remotely — +- Handling DST in `CronSchedules` — +- Allow Python classes as flow/task type hints — +- Fix formatting of `SendgridEmail.to_emails` example in notifications API reference — +- Streamline Artifact search filters to match other pages in the Prefect UI - +- Improve the mobile navigation in the Prefect UI — + +### Documentation + +- Add object ACL documentation — +- Use better arrow icon for `Try Cloud` button — +- Improves bash output format in code blocks on concepts/agents page — +- Update concepts screen shots to reflect improved Prefect UI — +- Update event feed screenshot in concepts pages — +- Update Prefect Cloud index screenshots and remove Prefect Cloud quickstart — +- Add error summaries section to Prefect Cloud index — +- Clarify supported artifact types — +- Update Prefect Cloud pages screenshots — +- Fix broken links in events concept docs and variables guide — + +### New Contributors + +- @odoublewen made their first contribution in + +**All changes**: + +## Release 2.13.0 + +### Introducing global concurrency limits + +Control task execution and system stability with Prefect's new global concurrency and rate limits. + +- **Concurrency Limits:** Manage task execution efficiently, controlling how many tasks can run simultaneously. Ideal for optimizing resource usage and customizing task execution. + +- **Rate Limits:** Ensure system stability by governing the frequency of requests or operations. Perfect for preventing overuse, ensuring fairness, and handling errors gracefully. + +Choose concurrency limits for resource optimization and task management, and opt for rate limits to maintain system stability and fair access to services. To begin using global concurrency limits check out our [guide](https://docs.prefect.io/guides/global-concurrency-limits/). + +See the following pull request for details: + +- + +### Introducing work pool and worker status + +Work pools and workers are critical components of Prefect's distributed execution model. To help you monitor and manage your work pools and workers, we've added status indicators to the Prefect UI. + +Work pools can now have one of three statuses: + +- `Ready` - at least one online worker is polling the work pool and the work pool is ready to accept work. +- `Not Ready` - no online workers are polling the work pool and indicates that action needs to be taken to allow the work pool to accept work. +- `Paused` - the work pool is paused and work will not be executed until it is unpaused. + +![Prefect dashboard showing work pool health](https://user-images.githubusercontent.com/12350579/265874237-7fae81e0-1b1a-460b-9fc5-92d969326d22.png) + +Workers can now have one of two statuses: + +- `Online` - the worker is polling the work pool and is ready to accept work. +- `Offline` - the worker is not polling the work pool and is not ready to accept work. Indicates that the process running the worker has stopped or crashed. + +![worker table showing status](https://user-images.githubusercontent.com/12350579/265815336-c8a03c06-2b48-47c5-be93-1dbde0e5bf0d.png) + +With the introduction of work pool and worker status, we are deprecating work queue health. Work queue health indicators will be removed in a future release. + +See the documentation on [work pool status](https://docs.prefect.io/latest/concepts/work-pools/#work-pool-status) and [worker status](https://docs.prefect.io/latest/concepts/work-pools/#worker-status) for more information. + +See the following pull request for details: + +- +- + +### Removing deprecated Orion references + +Six months ago, we deprecated references to `orion` in our codebase. In this release, we're removing those references. If you still have references to `ORION` in your profile, run `prefect config validate` to automatically convert all of the settings in your _current_ profile to the new names! + +For example: + +```bash +❯ prefect config validate +Updated 'PREFECT_ORION_DATABASE_CONNECTION_URL' to 'PREFECT_API_DATABASE_CONNECTION_URL'. +Configuration valid! +``` + +#### Below is a full guide to the changes + +##### Settings renamed + + - `PREFECT_LOGGING_ORION_ENABLED` → `PREFECT_LOGGING_TO_API_ENABLED` + - `PREFECT_LOGGING_ORION_BATCH_INTERVAL` → `PREFECT_LOGGING_TO_API_BATCH_INTERVAL` + - `PREFECT_LOGGING_ORION_BATCH_SIZE` → `PREFECT_LOGGING_TO_API_BATCH_SIZE` + - `PREFECT_LOGGING_ORION_MAX_LOG_SIZE` → `PREFECT_LOGGING_TO_API_MAX_LOG_SIZE` + - `PREFECT_LOGGING_ORION_WHEN_MISSING_FLOW` → `PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW` + - `PREFECT_ORION_BLOCKS_REGISTER_ON_START` → `PREFECT_API_BLOCKS_REGISTER_ON_START` + - `PREFECT_ORION_DATABASE_CONNECTION_URL` → `PREFECT_API_DATABASE_CONNECTION_URL` + - `PREFECT_ORION_DATABASE_MIGRATE_ON_START` → `PREFECT_API_DATABASE_MIGRATE_ON_START` + - `PREFECT_ORION_DATABASE_TIMEOUT` → `PREFECT_API_DATABASE_TIMEOUT` + - `PREFECT_ORION_DATABASE_CONNECTION_TIMEOUT` → `PREFECT_API_DATABASE_CONNECTION_TIMEOUT` + - `PREFECT_ORION_SERVICES_SCHEDULER_LOOP_SECONDS` → `PREFECT_API_SERVICES_SCHEDULER_LOOP_SECONDS` + - `PREFECT_ORION_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE` → `PREFECT_API_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE` + - `PREFECT_ORION_SERVICES_SCHEDULER_MAX_RUNS` → `PREFECT_API_SERVICES_SCHEDULER_MAX_RUNS` + - `PREFECT_ORION_SERVICES_SCHEDULER_MIN_RUNS` → `PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS` + - `PREFECT_ORION_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME` → `PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME` + - `PREFECT_ORION_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME` → `PREFECT_API_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME` + - `PREFECT_ORION_SERVICES_SCHEDULER_INSERT_BATCH_SIZE` → `PREFECT_API_SERVICES_SCHEDULER_INSERT_BATCH_SIZE` + - `PREFECT_ORION_SERVICES_LATE_RUNS_LOOP_SECONDS` → `PREFECT_API_SERVICES_LATE_RUNS_LOOP_SECONDS` + - `PREFECT_ORION_SERVICES_LATE_RUNS_AFTER_SECONDS` → `PREFECT_API_SERVICES_LATE_RUNS_AFTER_SECONDS` + - `PREFECT_ORION_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS` → `PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS` + - `PREFECT_ORION_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS` → `PREFECT_API_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS` + - `PREFECT_ORION_API_DEFAULT_LIMIT` → `PREFECT_API_DEFAULT_LIMIT` + - `PREFECT_ORION_API_HOST` → `PREFECT_SERVER_API_HOST` + - `PREFECT_ORION_API_PORT` → `PREFECT_SERVER_API_PORT` + - `PREFECT_ORION_API_KEEPALIVE_TIMEOUT` → `PREFECT_SERVER_API_KEEPALIVE_TIMEOUT` + - `PREFECT_ORION_UI_ENABLED` → `PREFECT_UI_ENABLED` + - `PREFECT_ORION_UI_API_URL` → `PREFECT_UI_API_URL` + - `PREFECT_ORION_ANALYTICS_ENABLED` → `PREFECT_SERVER_ANALYTICS_ENABLED` + - `PREFECT_ORION_SERVICES_SCHEDULER_ENABLED` → `PREFECT_API_SERVICES_SCHEDULER_ENABLED` + - `PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED` → `PREFECT_API_SERVICES_LATE_RUNS_ENABLED` + - `PREFECT_ORION_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED` → `PREFECT_API_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED` + - `PREFECT_ORION_SERVICES_PAUSE_EXPIRATIONS_ENABLED` → `PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_ENABLED` + - `PREFECT_ORION_TASK_CACHE_KEY_MAX_LENGTH` → `PREFECT_API_TASK_CACHE_KEY_MAX_LENGTH` + - `PREFECT_ORION_SERVICES_CANCELLATION_CLEANUP_ENABLED` → `PREFECT_API_SERVICES_CANCELLATION_CLEANUP_ENABLED` + +##### Changes + + - Module `prefect.client.orion` → `prefect.client.orchestration` + - Command group `prefect orion` → `prefect server` + - Module `prefect.orion` → `prefect.server` + - Logger `prefect.orion` → `prefect.server` + - Constant `ORION_API_VERSION` → `SERVER_API_VERSION` + - Kubernetes deployment template application name changed from `prefect-orion` → `prefect-server` + - Command `prefect kubernetes manifest orion` → `prefect kubernetes manifest server` + - Log config handler `orion` → `api` + - Class `OrionLogWorker` → `APILogWorker` + - Class `OrionHandler` → `APILogHandler` + - Directory `orion-ui` → `ui` + - Class `OrionRouter` → `PrefectRouter` + - Class `OrionAPIRoute` → `PrefectAPIRoute` + - Class `OrionDBInterface` → `PrefectDBInterface` + - Class `OrionClient` → `PrefectClient` + +See the following pull request for details: + +- Remove deprecated `orion` references — + +### Fixes + +- Fix an issue with `prefect server start` on Windows - + +### Documentation + +- Update deployment concept documentation to emphasize server-side deployment — +- Add Kubernetes guide for deploying worker to Azure AKS — +- Add information on `--no-prompt` and `PREFECT_CLI_PROMPT` to deployment documentation — +- Fix broken link to docker guide with redirect and harmonize naming — +- Remove invalid link in API keys documentation — +- Update screenshots and CLI log output in quickstart documentation — + +**All changes**: + +## Release 2.12.1 + +This release includes some important fixes and enhancements. In particular, it resolves an issue preventing the flow run graph from rendering correctly in some cases. + +### Enhancements + +- Reduce logging noise on QueueServices startup failures and item processing failures — +- Expose a setting for configuring a process limit on served flows — + +### Fixes + +- Improve failure recovery for websockets — +- Fix flow run graph rendering issues — + +### Documentation + +- Update Docker guide to include with `flow.serve()` — + +### Contributors + +- @urimandujano made their first contribution in + +**All changes**: + +## Release 2.12.0 + +### Introducing `Flow.serve()` + +We're excited to introduce a radically simple way to deploy flows. + +The new `.serve()` method available on every flow allows you to take your existing flows and schedule or trigger runs via the Prefect UI and CLI. + +This addition makes it easier than it's ever been to deploy flows with Prefect: + +```python title="hello.py" +from prefect import flow + +@flow +def hello(name = "Marvin"): + print(f"Hello {name}!") + +if __name__ == "__main__": + # Creates a deployment named 'hello/hourly-greeting' + # which will run the 'hello' flow once an hour + hello.serve(name="hourly-greeting", interval=3600) +``` + +Running this script will start a process that will run the `hello` flow every hour and make it triggerable via the Prefect UI or CLI: + +``` +> python hello.py +╭─────────────────────────────────────────────────────────────────────────────────────╮ +│ Your flow 'hello' is being served and polling for scheduled runs! │ +│ │ +│ To trigger a run for this flow, use the following command: │ +│ │ +│ $ prefect deployment run 'hello/hourly-greeting' │ +│ │ +╰─────────────────────────────────────────────────────────────────────────────────────╯ +``` + +To start serving your flows, check out our newly updated [quickstart](https://docs.prefect.io/latest/getting-started/quickstart/) and [tutorial](https://docs.prefect.io/latest/tutorial/). + +See the following pull requests for details: + +- +- +- +- + +### A fresh look and feel + +The Prefect UI just got a fresh coat of paint! We've carefully updated colors throughout the UI to ensure a more cohesive and visually appealing experience. Whether you're a fan of the light or dark side (or switch between both), you'll notice our interfaces now shine brighter and feel more harmonious. Dive in and explore the new hues! + +![Updated Prefect UI in light and dark modes](https://github.com/PrefectHQ/prefect/assets/42048900/c526619c-22d3-44e6-82ee-255ae1233035) + +See the following pull requests for implementation details: + +- +- +- +- +- + +### Enhancements + +- Allow JSON infra overrides via `prefect deploy` — +- Improve validation for `Flow.name` — +- Add a Docker image for conda for Python 3.11 — +- Increase default `PREFECT_API_REQUEST_TIMEOUT` setting to 60 seconds — +- Remove missing work queue warning from the deployment page — +- Add `PREFECT_SQLALCHEMY_POOL_SIZE` and `PREFECT_SQLALCHEMY_MAX_OVERFLOW` settings to configure SQLAlchemy connection pool size — +- Improve format handling of `GitLab` and `Bitbucket` tokens during `git_clone` deployment step — +- Persist active tabs in Prefect UI pages upon refresh — +- Add ability to view subflows in the UI that are linked from `run_deployment` with `DaskTaskRunner` and `RayTaskRunner` — +- Improve CLI output for push work pools + +### Fixes + +- Pin `anyio` to < 4 in `requirements.txt` — +- Add upper bounds to core requirements to prevent major version upgrades +- Fix race condition in concurrent subflow runs involving `AsyncWaiters` — +- Fix `cloud login` false success when `PREFECT_API_KEY` set as environment variable or expired — +- Fix ability to view deployments page tags on larger screens - +- Properly indent `docker-git` recipe `prefect.yaml` — +- Fix Slack community invitation link — + +### Experimental + +- Serialize concurrency requests — + +### Documentation + +- Detail Kubernetes work pool usage in Kubernetes guide — +- Add quickstart documentation, simplify welcome page and API reference overview — +- Add block and agent-based deployments to leftside navigation — +- Add `Try Prefect Cloud` button to documentation header — +- Remove blank menu bar in documentation header — +- Fix link to guide on moving data to and from cloud providers — +- Shorten push work pools description in guides index — +- Organize guides index into sections: Development, Execution, Workers and Agents, and Other Guides — + +### Contributors + +- @mattklein + +**All changes**: + +## Release 2.11.5 + +### New Guides + +We're happy to announce two new guides to help you get the most out of Prefect! + +#### How to move data to and from cloud providers + +Moving data to cloud-based storage and retrieving it is crucial in many data engineering setups. [This guide](https://docs.prefect.io/latest/guides/moving-data/) provides step-by-step instructions to seamlessly integrate and interact with popular cloud services like AWS, Azure, and GCP. + +#### Running flows with Kubernetes + +For those aiming to optimize their flows using Kubernetes, this guide provides a deep dive on how to efficiently run flows on Kubernetes using containers. Catering to both novices and seasoned experts, [this guide](https://docs.prefect.io/latest/guides/deployment/kubernetes/) offers insights for all proficiency levels. + +See the following pull requests for details: + +- +- +- + +### Enhancements + +- Warn users upon setting a misconfigured `PREFECT_API_URL` — +- Show CLI warning if worker is polling a paused work pool or queue — +- Optimize the query generated by the `/task_runs` endpoint — +- Extend optimization on `/task_runs` endpoint to include safety guard — +- Add `DiscordWebhook` notification block — +- Remove reference to deprecated `prefect project ls` in interactive `prefect deploy` command — + +### Fixes + +- Remove base job template validation when work pools are read — + +### Experimental + +- Codify concurrency context managers and rate limiting with tests — + +### Documentation + +- Add reference to workers in flows documentation admonition — +- Combine Kubernetes worker and flows pages — +- Remove references to `flow_name` from deployments documentation — +- Improve readability of Kubernetes guide — +- Fix typos in contribution and host documentation — +- Raise visibility of push work pools documentation — +- Fix heading size, remove unnecessary link in deployments documentation — +- Add GCP-specific guide for deploying a GKE cluster to host a worker — +- Fix typo in `prefect-gcs` deployment example — +- Move guide on upgrading from agents to workers — +- Fix grammatical errors in documentation — +- Clarify deployments variables and fix `prefect.yaml` example — +- Update `README` header image with new Prefect branding — + +## Contributors + +- @mattklein made their first contribution in +- @vishalsanfran made their first contribution in +- @AmanSal1 +- @mj0nez + +**All changes**: + +## Release 2.11.4 + +### Guide to upgrade from agents to workers + +Upgrading to workers significantly enhances the experience of deploying flows. It simplifies the specification of each flow's infrastructure and runtime environment. + +A [worker](/concepts/work-pools/#worker-overview) is the fusion of an [agent](/concepts/agents/) with an [infrastructure block](/concepts/infrastructure/). Like agents, workers poll a work pool for flow runs that are scheduled to start. Like infrastructure blocks, workers are typed - they work with only one kind of infrastructure and they specify the default configuration for jobs submitted to that infrastructure. + +We've written [a handy guide](https://docs.prefect.io/latest/guides/upgrade-guide-agents-to-workers/) that describes how to upgrade from agents to workers in just a few quick steps. + +### Visualize your flow before running it + +Until now, the only way to produce a visual schematic of a flow has been to run it and view the corresponding flow run page in the Prefect UI. Some flows, though, are time consuming or expensive to run. Now, you can get a quick sense of the structure of your flow using the `.visualize()` method. Calling this method will attempt to locally produce an image of the flow's schematic diagram without running the flow's code. + +![viz-return-value-tracked](https://github.com/PrefectHQ/prefect/assets/3407835/325ef46e-82ce-4400-93d2-b3110c805116) + +See the [flows documentation](https://docs.prefect.io/latest/concepts/flows/#visualizing-flow-structure) or the [pull request](https://github.com/PrefectHQ/prefect/pull/10417) for more information. + +### Enhancements + +- Update `prefect deploy` to skip building docker image prompt if `build` key explicitly set to null in `prefect.yaml` — +- Handle spot instance eviction in Kubernetes Infrastructure Block — + +### Fixes + +- Reduce wait time between tasks by adding a clause to the visiting function to raise if it encounters a quote annotation — +- Enable dashboard filters to update with each polling interval so the 24h time span constantly updates — +- Resolve issue with validation of templated variables in base job template of work pool — +- Update CLI to refer to a "work pool" instead of a "worker pool" — + +### Documentation + +- Elevate Guides in navigation and remove migration guide — +- Update notes about community support — +- Update concepts page to clean up table and remove unnecessary header — +- Improve headings on deployments concept page — +- Update the storage guide for Bitbucket to add `x-token-auth` — +- Add Planetary Computer collection — +- Highlight `@flow` decorator instead of function in tutorial — +- Update tutorial summary list — +- Update Cloud connection guide to include whitelisting URLs — +- Update code snippets and highlighting in tutorial — +- Remove "Reference Material" section from tutorial — +- Fix typo in schedules concept page — +- Fix typo on artifacts concept page — + +### Contributors + +- @shahrukhx01 made their first contribution in +- @giorgiobasile +- @marwan116 + +**All changes**: + +## Release 2.11.3 + +## Enhanced support for environment variables in `run_shell_script` step + +Previously, to expand environment variables in the `run_shell_script` step, you had to enclose your scripts in `bash -c`. We have optimized this process by introducing a new field: `expand_env_vars`. By setting this field to `true`, you can easily pass environment variables to your script. + +Consider the following example where the script utilizes the `$USER` environment variable: + +```yaml +pull: + - prefect.deployments.steps.run_shell_script: + script: | + echo "User: $USER" + echo "Home Directory: $HOME" + stream_output: true + expand_env_vars: true +``` + +For implementation details, see the following pull request: + +- + +### Enhancements + +- Change language for `--ci` option in `prefect deploy --help`. — + +### Experimental + +- Port concurrency limit v2 API and modeling from Prefect Cloud — + +### Documentation + +- Add Prefect Cloud quickstart to navigation menu — +- Fix typo in deployments documentation — +- Reorganize concepts pages — + +### Contributors + +- @AmanSal1 + +**All changes**: + +## Release 2.11.2 + +### Enhancements + +- Explicitly set all calls to `pendulum.now()` to "UTC" — + +### Documentation + +- Add guide for specifying storage for deployments — +- Add ACI push work pool guide — +- Move some concepts and cloud pages to guides section — + +### Deprecations + +- Deprecate `FlowRunCreate.deployment_id` — + +### Contributors + +- @psofiterol made their first contribution in + +**All changes**: + +## Release 2.11.1 + +### Enhancements + +- Add `work_queue_name` field when creating a flow run for a deployment, enabling the queue setting to be overridden on a per-run basis — +- Prevent accidental credential logging on BindFailure by logging only a list of key names, but not the values — +- Allow task runs to explicitly return `Paused` states, therefore pausing the flow run using the same settings — + +### Fixes + +- Hide links to work queues for push work pools — +- Fix issue with `Pause` state fields — +- Fix issue with flow run logs missing until after refresh — + +### Experimental + +- Add a general use concurrency context manager — +- Add `rate_limit` function to block execution while acquiring slots — + +### Documentation + +- Add redirect to quickstart page — +- Add missing quotation mark in docstring — +- Fix `run_deployment` docstring rendering — +- Fix type in deployment docs — + +### Contributors + +- @Sche7 made their first contribution in +- @LennyArdiles made their first contribution in +- @Akshat0410 made their first contribution in + +**All changes**: + +## Release 2.11.0 + +### Flow summary graphs and stats + +Each flow page now includes graphs of its recent flow runs, task runs, and (in Prefect Cloud) related events, as well as summary statistics! + +Screenshot 2023-07-20 at 3 42 51 PM + +Flow details have been moved to a dedicated tab. For implementation details, see the following pull request: + +- + +### Work pools and workers are now generally available + +Since first being introduced in Prefect 2.10.0, Prefect [workers and work pools](https://docs.prefect.io/2.10.21/concepts/work-pools/) have come a long way. There are now work pools for every major infrastructure type. Work pools expose rich configuration of their infrastructure. Every work pool type has a base configuration with sensible defaults such that you can begin executing work with just a single command. The infrastructure configuration is fully customizable from the Prefect UI. + +Push work pools, recently released in Prefect Cloud, remain a beta feature. + +For implementation details, see the following pull requests: + +- +- + +### Enhancements + +- Use `orjson_dumps_extra_compatible` when serializing in `build_from_flow` — + +### Fixes + +- Make `resolve_futures_to_data` function raise on failure by default — +- Fix flow runs page not polling for new runs and not loading more flow runs when scrolling — +- Don't create DB default during settings load — +- Fix issues causing flow runs to be incorrectly marked as failed — +- Fix incorrect path in error message — +- Fix `LocalFileSystem.get_directory` with basepath behaviour — +- Fix Dashboard refresh cadence — + +### Documentation + +- Add undocumented runtime parameters — +- Add Deployment Quickstart — +- Add guide for setting up a push work pool — +- Add guide for deploying a flow using Docker — +- Edit install and quickstart pages for clarity — +- Update automations screenshots — +- Fix typos on Deployment Management page — +- Fix flow retries example — +- Fix missing document title and adding terminal login section — + +### Contributors + +- @dbentall made their first contribution in +- @mesejo + +**All changes**: + +## Release 2.10.21 + +### The Prefect Dashboard - your heads up display + +The response to the experimental Prefect dashboard was so enthusiastic that we've made it generally available as the default landing page in the Prefect UI. The dashboard provides an overview of all Prefect activity, surfaces the urgent information, and provides the context to understand that information. With the dashboard, you can: + +- Confirm that all flows run in the past 24 hours behaved as expected +- Identify a flow run that recently failed and jump directly to its page +- See a work pool that is unhealthy and the work that is impacted + +### Deploy deployments prefixed by flow name during `prefect deploy` + +You can now specify the deployment to be executed by prefixing the deployment name with the flow name. + +For example, the following command creates a deployment with the name `my-deployment` for a flow with the name `my-flow`: + +```bash +prefect deploy --name my-flow/my-deployment +``` + +This is especially useful when you have several flows with deployments that have the same name. + +For implementation details, see the following pull request: + +- + +### Use environment variables in deployment steps + +Prefect now supports the usage of environment variables in deployment steps, allowing you to access environment variables during the `pull` action at runtime or during the `build` and `push` actions when running `prefect deploy`. Particularly useful for CI/CD builds, this makes Prefect deployments more versatile. + +For example, you can now use the following syntax to set an image tag of a Dockerized build by loading an environment variable during the `build` action: + +```yaml +build: + - prefect_docker.deployments.steps.build_docker_image: + requires: prefect-docker>0.1.0 + image_name: my-image/orion + tag: "{{ $CUSTOM_TAG }}" +``` + +You can also use environment variables inside of steps. + +For example: + +```yaml +- prefect.deployments.steps.run_shell_script: + script: echo "test-'{{ $PREFECT_API_URL }}'" + stream_output: true +``` + +For implementation details, see the following pull request: + +- + +### Use `prefect deploy` with multiple deployments with the same name + +When there are multiple deployments with the same name, the `prefect deploy` command now prompts you to choose which one to deploy: + +For example, if you have the following `prefect.yaml`: + +```yaml +deployments: + - name: "default" + entrypoint: "flows/hello.py:hello" + + - name: "default" + entrypoint: "flows/hello.py:hello_parallel" +``` + +running `prefect deploy -n default` will now prompt you to choose which flow to create a deployment for: + +prompt choose a deployment + +For implementation details, see the following pull request: + +- + +### Enhancements + +- Enable workspace dashboard by default — +- Add `SendgridEmail` notification block — +- Raise state change hook errors during creation if not correctly formatted — +- Improve `prefect deploy` nonexistent entrypoint `ValueError` - +- Truncate row length in interactive `prefect deploy` table display - +- Add `prefect.runtime.flow_run.parent_flow_run_id` and `prefect.runtime.flow_run.parent_deployment_id` - + +### Fixes + +- Adds handling for failed Kubernetes jobs — + +### Documentation + +- Fix formatting in `mkdocs.yml` — +- Fix link to API docs in automations documentation — +- Remove the duplicate listing in installation documentation — +- Fix example in proactive trigger documentation — +- Remove references to nonexistent `prefect profile get` - + +## Contributors + +- @rkscodes + +- @Ishankoradia made their first contribution in +- @bsenst made their first contribution in + +**All changes**: + +## Release 2.10.20 + +### Resolving UI form input issues + +This release resolves bugs preventing UI form inputs from being rendered and parsed correctly, including: + +- Dates & times — +- List values — +- JSON fields — + +### Prefect no longer supports Python 3.7 + +Python 3.7 reached end-of-life on 27 Jun 2023. Consistent with our warning, this release drops Python 3.7 support. Prefect now requires Python 3.9 or later. + +### Enhancements + +- Add UUID validation for webhook CLI commands to raise errors earlier and more clearly — +- Clarify Dockerfile rename prompt in `prefect deploy` — +- Improve `prefect deploy` error message — +- Add `work_pool_name` to `Deployment` docstring — + +### Contributors + +- @toby-coleman + +**All changes**: + +## Release 2.10.19 + +### Peer into the future with the experimental dashboard + +We're excited to make the new Prefect dashboard available as an experimental feature. The dashboard provides an overview of all Prefect activity, surfaces the urgent information, and provides the context to understand that information. With the dashboard, you can: + +- Confirm that all flows run in the past 24 hours behaved as expected +- Identify a flow run that recently failed and jump directly to its page +- See a work pool that is unhealthy and the work that is impacted + +You can enable the new dashboard by running `prefect config set PREFECT_EXPERIMENTAL_ENABLE_WORKSPACE_DASHBOARD=True` in your terminal. + +See [this pull request](https://github.com/PrefectHQ/prefect/pull/10152) for implementation details. + +### Improvements to `git_clone` deployment pull step + +Previously, users had to apply the appropriate format for their service credentials in a `Secret` block using the `access_token` field in `git_clone`. The `git_clone` pull step now includes an additional `credentials` field, allowing users to leverage their existing `GitHubCredentials`, `GitLabCredentials`, or `BitBucketCredentials` blocks when cloning from a private repository. For examples of providing credentials, see the [updated documentation](https://docs.prefect.io/2.10.19/concepts/deployments-ux/#the-pull-action). + +For implementation details see: + +- + +### Fixes + +- Improve language in `prefect deploy` to not recommend deprecated `-f/--flow` — +- Pin Pydantic to v1 in `requirements.txt` — +- Add default value of `None` for `WorkQueue.work_pool_id` — + +### Documentation + +- Update `git_clone` documentation with examples of using credentials field - +- Add documentation on deleting blocks — +- Add docs tabs linking and styling — +- Fix example in `Block.load` docstring — +- Fix task tutorial documentation example — +- Clarify heading in rate limits documentation — +- Fix link in events documentation — +- Remove outdated disclaimer about configuring webhooks with the Prefect Cloud UI — + +### Integrations + +- Add `prefect-earthdata` integration — + +### Contributors + +- @rkscodes +- @StefanBRas + +- @JordonMaule made their first contribution in + +- @AmanSal1 made their first contribution in +- @giorgiobasile made their first contribution in + +**All changes**: + +## Release 2.10.18 + +### Docker image support during flow deployment + +We enhanced support for Docker-based infrastructures when deploying flows through the interactive `prefect deploy` experience. Users can now easily custom-build or auto-build Docker images and push them to remote registries if they so choose. + +The CLI automatically detects if a work pool supports Docker images (e.g., docker, ecs, cloud-run) during `prefect deploy` and will now guide the user through the experience of building and pushing a Docker image if support is detected. + +This enhancement to managing deployments will greatly simplify the process of creating `build` and `push` steps for deployments. + +Not only that, we will also create a `pull` step for you when you choose to build a Docker image through `prefect deploy`. Whether you have your own Dockerfile or you want to use the auto-build feature in `build_docker_image`, we will create a `pull` step for you to help you set the correct path to your flow code. + +See the following pull requests for implementation details: + +- +- + +### Event-driven deployments with triggers + +You can now easily incorporate event-based triggers into your Prefect Cloud deployments - simply add triggers to your `prefect.yaml` file or directly from the Prefect UI deployment page. Deployment triggers utilize automations - any automation that runs flows from a given deployment will be reflected on that deployment page. + +See the following pull requests for implementation details: + +- +- + +### Enhancements + +- Allow saving of updated deployment configurations — +- Add `--install-policy` option to `prefect worker start` - +- Update Docker-based `prefect init` recipes to use `push_docker_image` step — + +### Fixes + +- Fix deployment `pull` step saving by preserving placeholders with missing values — +- Fix `prefect server start` and `prefect agent start` on Windows — +- Add ability to use Prefect variables in `job_variables` section of deploy config in `prefect.yaml` — +- Add default option to `new_parameters.pop` in `explode_variadic_parameter` used to handle `**kwargs` in task mapping — +- Skip schedule prompts in `prefect deploy` if schedule is set or null in `prefect.yaml` — +- Fix saving of `pull` and `push` step deployment configuration — +- Fix issue hosting and running the UI in unsecured contexts - + +### Documentation + +- Adjust docs to reflect Prefect requires Python 3.9 — +- Add custom `pull` step examples to deployment management docs — +- Add troubleshooting guide to docs — +- Add information on finding Prefect Cloud account id and workspace id — +- Reference webhooks documentation from events documentation — +- Simplify deployment description in docs — + +### Contributors + +- @garylavayou made their first contribution in +- @themattmorris made their first contribution in +- @NodeJSmith +- @rpeden + +**All changes**: + +## Release 2.10.17 + +### Improved Prefect tutorial + +Prefect's documentation has an [improved tutorial](https://docs.prefect.io/2.10.17/tutorial/), redesigned to include Prefect's recent enhancements. With the introduction of work pools and the interactive deployment CLI, the new tutorial reflects the elevated experience that these new features offer, alongside the key elements and features of Prefect. You can find content related to more advanced features or less common use cases in the [Guides](https://docs.prefect.io/2.10.17/guides/) section. + +### Enhancements + +- Update Prefect client to follow redirects by default — +- Always show checkboxes on list items, rather than animating them on hover — +- New `CustomWebhookNotificationBlock` for triggering custom webhooks in response to flow run state changes — + +### Fixes + +- Limit the number of files concurrently opened by `prefect deploy` when searching for flows — +- Fix `TypeError: crypto.randomUUID is not a function` that caused pages to break — + +### Documentation + +- Fix broken link to `prefect-docker` documentation on the deployments UX page — +- Document `--work-queue / -q` arguments to `worker start` command — +- Add link to join Club 42 to Community page — +- Improve Prefect tutorial to be more succinct and purposeful — + +### Contributors + +- @eclark9270 made their first contribution in + +- @AutumnSun1996 made their first contribution in +- @dianaclarke made their first contribution in + +**All changes**: + +## Release 2.10.16 + +### Run `prefect deploy` without providing a flow entrypoint + +We're making it easier than ever to deploy your first flow! Previously, you needed to run `prefect deploy ` to deploy a specific flow. Now, you can simply run `prefect deploy` and the interactive CLI will guide you through the process of selecting a flow to deploy! + +![flow selector example](https://user-images.githubusercontent.com/12350579/247144440-d89916d4-cbf1-408e-9959-45df94a35f8d.png) + +For more details on implementation, see the following pull request: + +- + +### Enhancements + +- Add option to specify work queue priority during creation from CLI — +- Improve 'Invalid timezone' error message — + +### Fixes + +- Fix wrong key used in generated `git_clone` step — + +### Deprecations + +- Deprecate `prefect deploy` `--ci` flag — + +### Documentation + +- Resolve missing image in Prefect Cloud event documentation — +- Fix typo in webhooks documentation — + +### Integrations + +- Fix bug in `KubernetesWorker` where flow runs crashed during submission - + +### Contributors + +- @kkdenk made their first contribution in +- @rito-sixt + +**All changes**: + +## Release 2.10.15 + +## Introducing deployment configuration saving in `prefect deploy` + +We are excited to announce a significant enhancement to our `prefect deploy` command to make your deployment process even more intuitive. + +Previously, users had to recall their deployment configurations each time they wanted to redeploy with the same settings. Recognizing this potential inconvenience, we've now incorporated a feature to save your deployment inputs for future use, thereby streamlining redeployments. + +The new interactive `prefect deploy` command guides you through the deployment process, from setting the schedule and the work pool to the `pull` step. After your deployment is created, you will have the option to save your inputs. Choosing "yes" will create a `prefect.yaml` file if one does not exist. The `prefect.yaml` file will contain your inputs stored in the deployments list and the generated `pull` step. + +![saving with prefect deploy demo](https://github.com/PrefectHQ/prefect/assets/12350579/47d30cee-b0db-42c8-9d35-d7b25cd7856c) + +If you have a `prefect.yaml` file in the same directory where you run your command, running the `deploy` command again allows you to reuse the saved deployment configuration or create a new one. If you choose to create a new deployment, you will again be given the option to save your inputs. This way, you can maintain a list of multiple deployment configurations, ready to be used whenever needed! + +For more details on implementation, see the following pull request: + +- + +### Fixes + +- Fix error in `prefect deploy` when `.prefect` folder is absent — +- Fix use of deprecated `git_clone_project` — +- Fix exception raised in `prefect init` command when no recipe is selected — + +### Documentation + +- Fix broken deployments api-ref page — + +**All changes**: + +## Release 2.10.14 + +### Simplifying project-based deployments + +We've now simplified deployment management even further by consolidating the `prefect.yaml` and `deployment.yaml` files and removing the creation of the `.prefect` folder when running `prefect init`. We've also deprecated the name `projects`, renaming steps that had `projects` in the name. + +For example: + +```yaml +pull: + - prefect.projects.steps.git_clone_project: + id: clone-step + repository: https://github.com/org/repo.git +``` + +is now + +```yaml +pull: + - prefect.deployments.steps.git_clone: + id: clone-step + repository: https://github.com/org/repo.git +``` + +An example using the `prefect_gcp` library: + +```yaml +build: + - prefect_gcp.projects.steps.push_project_to_gcs: + requires: prefect-gcp + bucket: my-bucket + folder: my-project +``` + +is now + +```yaml +build: + - prefect_gcp.deployments.steps.push_to_gcs: + requires: prefect-gcp + bucket: my-bucket + folder: my-project +``` + +In addition, we've removed the need to use the `project` command group through the CLI. Now, instead of `prefect project init` you can simply run `prefect init`. To use a deployment configuration recipe during initialization, you no longer need to run a `prefect project` command. Running `prefect init` will guide you through an interactive experience to choose a recipe if you so desire. + +![prefect init recipe interaction](https://github.com/PrefectHQ/prefect/assets/42048900/c2bea9b4-4e1f-4029-8772-50ecde6073a7) + +We have also deprecated deploying a flow via flow name (`-f`), allowing a single, streamlined way to deploy. + +```python +prefect deploy ./path/to/flow.py:flow-fn-name +``` + +See these pull requests for implementation details: + +- +- +- +- +- +- +- +- + +### Prefect Cloud Webhook CLI + +[Webhooks on Prefect Cloud](https://docs.prefect.io/2.10.14/cloud/webhooks/) allow you to capture events from a wide variety of sources in your data stack, translating them into actionable Prefect events in your workspace. Produce Prefect events from any system that can make an HTTP request and use those events in automations or to trigger event-driven deployments. + +Even if you have minimal control over the systems you're integrating with, Prefect Cloud webhooks give you [full programmable control](https://docs.prefect.io/2.10.14/cloud/webhooks/#webhook-templates) over how you transform incoming HTTP requests into Prefect events with Jinja2 templating. We even have a [built-in preset for CloudEvents](https://docs.prefect.io/2.10.14/cloud/webhooks/#accepting-cloudevents). + +Webhooks are currently available [via the API and `prefect` CLI](https://docs.prefect.io/2.10.14/cloud/webhooks/#configuring-webhooks). + +You can create your first Cloud webhook via the CLI like so: + +```bash +prefect cloud webhook create your-webhook-name \ + --description "Receives webhooks from your system" \ + --template '{ "event": "your.event.name", "resource": { "prefect.resource.id": "your.resource.id" } }' +``` + +See the following pull request for implementation details: + +- + +### Enhancements + +- Make related automations visible from `prefect deployment inspect` — +- Enable deleting blocks with Python SDK — +- Enhance ability to delete a single flow on the flows page - +- Add `work_pool_name` to work queue API responses — +- Add httpx request method to Prefect Cloud client — +- Mark flow as crashed if infrastructure submission fails — +- Re-enable the retrieval of existing clients from flow and task run contexts when safe — +- Add `prefect --prompt/--no-prompt` to force toggle interactive CLI sessions — +- Return sorted task run ids when inspecting concurrency limit via CLI — +- Use existing thread in `BatchedQueueService` to reduce queue retrieval overhead — + +### Fixes + +- Provide a default `DTSTART` to anchor `RRULE` schedules to ensure extra schedules not created — +- Fix bug where attribute error raised on service shutdown when the app startup fails — +- Improve retry behavior when SQLite database locked — + +### Documentation + +- Add tip on `PREFECT_API_URL` setting for workers and agents — +- Add deployment triggers documentation — +- Add more detailed documentation to the engine api-ref — +- Add note on matching on multiple resources when using automations — +- Updates automations examples in docs — +- Update Prefect Cloud users documentation on user settings — +- Boost non-API docs pages to optimize search results — +- Update testing documentation tag — +- Exemplify how to import Prefect client — + +## Contributors + +- @Hongbo-Miao +- @rito-sixt made their first contribution in +- @drpin2341 made their first contribution in +- @amansal1 made their first contribution in + +**All changes**: + +## Release 2.10.13 + +### Improvements to projects-based deployments + +![prefect deploy output with interactive cron schedule](https://github.com/PrefectHQ/prefect/assets/12350579/c94f45e6-3b7a-4356-84cd-f36a29f0415c) + +Project-based deployments are now easier to use, especially for first time users! You can now run `prefect deploy` without first initializing a project. If you run `prefect deploy` without a project initialized, the CLI will generate a default pull step that your worker can use to retrieve your flow code when executing scheduled flow runs. The prefect deploy command will also prompt you with scheduling options, making it even easier to schedule your flows! + +See these two pull requests for implementation details: + +- +- + +This release also adds two new deployment steps: `pip_install_requirements` and `run_shell_script`. Both of these are new 'utility' deployment steps that can be used to automate portions of your deployment process. + +Use the `pip_install_requirements` step to install Python dependencies before kicking off a flow run: + +```yaml +pull: + - prefect.projects.steps.git_clone_project: + id: clone-step + repository: https://github.com/org/repo.git + - prefect.projects.steps.pip_install_requirements: + directory: { { clone-step.directory } } + requirements_file: requirements.txt + stream_output: False +``` + +Use the `run_shell_script` step to grab your repository's commit hash and use it to tag your Docker image: + +```yaml +build: + - prefect.projects.steps.run_shell_script: + id: get-commit-hash + script: git rev-parse --short HEAD + stream_output: false + - prefect.projects.steps.build_docker_image: + requires: prefect-docker + image_name: my-image + image_tag: "{{ get-commit-hash.stdout }}" + dockerfile: auto +``` + +See these two pull requests for implementation details: + +- +- + +### Enhancements + +- Allow project `pull` steps to pass step outputs — +- Update work queue health indicators in Prefect UI for greater clarity - +- State messages no longer include tracebacks — +- Allow passing a payload to `emit_instance_method_called_event` - + +### Fixes + +- Reference `.prefectignore` files when moving files around locally to - +- Fix typo in warning message raised when flow is called during script loading — +- Allow creation of identical block names between different block types - +- Ensure flow timeouts do not override existing alarm signal handlers — +- Ensure timeout tracking begins from the actual start of the call, rather than the scheduled start — +- Ensure timeout monitoring threads immediately exit upon run completion — +- Fix bug where background services could throw logging errors on interpreter exit — +- Fix bug where asynchronous timeout enforcement could deadlock — + +### Documentation + +- Add documentation on Prefect Cloud webhook usage - +- Fix broken link and Prefect server reference in Cloud docs — +- Fix broken link to Docker guide in API reference docs — +- Update subflow run cancellation information in flows concept doc — +- Improve ability to give feedback on documentation — +- Add projects deployment diagram to work pool, workers & agents concept doc — +- Add missing Prefect Server URL in API reference docs — +- Fix code typo in task runners concept doc — +- Add documentation on flow run parameter size limit — +- Fix link to orchestration tutorial in execution tutorial - + +### Contributors + +- @ac1997 made their first contribution in + +**All changes**: + +## Release 2.10.12 + +### The deployments page is back + +We got a lot of positive feedback about the new flows page that was redesigned to include deployments, but several users pointed out that the it wasn't quite a full replacement for the dedicated deployments page. The deployments page has been re-added to the navigation menu until the new flows page is a worthy substitute. + +See the [pull request](https://github.com/PrefectHQ/prefect/pull/9800) for implementation details. + +### Enhancements + +- All server-side schemas now have dedicated client-side duplicates — +- Import of `prefect.server` is delayed to improve CLI start time and `import prefect` time — +- Add task run as a related object to emitted events — +- Emit task run state change events when orchestrating a task run — +- Add healthcheck webserver to workers — +- Create files and directories with user-scoped permissions — +- Runtime variables mocked with environment variables for testing are now coerced to the correct type — + +### Fixes + +- Show 404 instead of blank page in UI flow run id is invalid or if flow run is missing — +- Fix bug where event loop shutdown hooks could fail due to early garbage collection — +- Fix process worker `documentation_url` — +- Fix bug where given priority was ignored when creating a work queue — +- Fix inconsistent work queue handling by agent when cancelling flow runs — + +### Experimental + +- Add `dashboard` experiment via `ENABLE_WORKSPACE_DASHBOARD` — , + +### Deprecations + +- Deprecate `create_orion_api` in favor of `create_api_app` — +- Deprecate "send_to_orion" logging option in favor of "send_to_api" — + +### Documentation + +- Add descriptions to concept tables — +- Removes unreferenced requests import in 'real-world example' — +- Add state change hooks to guides overview page — +- Fix typo in flows and tasks tutorials — +- Update task docs to reference common params and link to all params — +- Add Google Analytics to documentation — +- Remove outdated announcement — +- Add extra loggers example — +- Clarify work pool priority options — +- Update worker requirements in projects tutorial — +- Fix default value comment in docs/concepts/variables — +- Fix formatting of link to Ray page — +- Add book a rubber duck links — + +### Contributors + +- @marco-buttu made their first contribution in +- @jcozar87 made their first contribution in +- @rmorshea + +**All changes**: + +## Release 2.10.11 + +### Interactive Deployments and Work Pool Wizard 🧙 + +This release simplifies deployment and work pool creation. + +![interactive-prefect-deploy-console-output](https://github.com/PrefectHQ/prefect/assets/12350579/c861b8dd-2dbb-4cfa-82f9-69008714f9fe) + +Firstly, the `prefect deploy` command has been upgraded to provide interactive prompts for deployment names and work pool selections. If you don't provide a deployment name via the CLI or a `deployment.yaml` file, the CLI will prompt you to do so. Furthermore, if a work pool name isn't specified, the CLI will guide you through the available work pools for your workspace. This feature aims to make deployments more approachable, especially for first-time users, requiring just an entrypoint to a flow to get started. + +![work-pool-wizard-infrastructure-choices](https://github.com/PrefectHQ/prefect/assets/12350579/383f004b-816e-4a52-98c3-46745e273362) + +Secondly, we've added a work pool creation wizard to streamline the process and spotlight various infrastructure types. The wizard will walk you through the essentials: basic work pool info, infrastructure type, and infrastructure configuration. The infrastructure type step will present you with a list of available infrastructure types, each with an icon and a description. + +Together, these improvements offer an interactive, guided experience that not only simplifies deployments and work pool creation but also empowers users to navigate the process confidently and efficiently. + +Check out these pull requests for more details: + +- +- +- + +### Enhancements + +- Emit events from deployments, work queues, and work pools — +- Improve SQLite database transaction behavior — +- Add support for SQLAlchemy 2 — +- Add `on_cancellation` flow run state change hook — +- Improve cancellation cleanup service iteration over subflow runs - +- Add request retry support to Prefect Cloud client — +- Add `PREFECT_CLIENT_MAX_RETRIES` for configuration of maximum HTTP request retries - +- Add an `/api/ready` endpoint to the Prefect server to check database connectivity — +- Display URL to flow run on creation - +- Add guard against changing the profile path from `prefect config set` — +- Use flow run logger to report traceback for failed submissions — +- Improve default Prefect image tag when using development versions — +- Emit worker event when a flow run is scheduled to run or cancel — +- Add ability to filter for `Retrying` state in the Task Runs tab of the Prefect UI — + +### Fixes + +- Display CLI deprecation warnings to STDERR instead of STDOUT — +- Fix hanging flow runs from deployments when variables retrieved in base scope - +- Fix maximum character length when updating variables — +- Fix bug where agents would fail when processing runs with deleted deployments — +- Fix bug where `uvicorn` could not be found when server was started from an unloaded virtual environment - +- Allow table artifacts `table` argument as list of lists — +- Fix bug where events worker would fail if the API URL includes a trailing `/` — +- Fix bug where flow run timeline crashed when custom state names were used — + +### Collections + +- Stream Kubernetes Worker flow run logs to the API - [#72](https://github.com/PrefectHQ/prefect-kubernetes/pull/72) +- Stream ECS Worker flow run logs to the API - [#267](https://github.com/PrefectHQ/prefect-aws/pull/267) +- Stream Cloud Run Worker flow run logs logs to the API - [#183](https://github.com/PrefectHQ/prefect-gcp/pull/183) +- Add `prefect-spark-on-k8s-operator` to integrations catalog list — [#9029](https://github.com/PrefectHQ/prefect/pull/9029) +- Add optional `accelerator_count` property for `VertexAICustomTrainingJob` - [#174](https://github.com/PrefectHQ/prefect-gcp/pull/174) +- Add `result_transformer` parameter to customize the return structure of `bigquery_query` - [#176](https://github.com/PrefectHQ/prefect-gcp/pull/176) +- Add `boot_disk_type` and `boot_disk_size_gb` properties for `VertexAICustomTrainingJob` - [#177](https://github.com/PrefectHQ/prefect-gcp/pull/177) +- Fix bug where incorrect credentials model was selected when `MinIOCredentials` was used with `S3Bucket` - [#254](https://github.com/PrefectHQ/prefect-aws/pull/254) +- Fix bug where `S3Bucket.list_objects` was truncating prefix paths ending with slashes - [#263](https://github.com/PrefectHQ/prefect-aws/pull/263) +- Fix bug where ECS worker could not cancel flow runs - [#268](https://github.com/PrefectHQ/prefect-aws/pull/268) +- Improve failure message when creating a Kubernetes job fails - [#71](https://github.com/PrefectHQ/prefect-kubernetes/pull/71) + +### Deprecations + +- Rename `prefect.infrastructure.docker` to `prefect.infrastructure.container` - +- Rename `prefect.docker` to `prefect.utilities.dockerutils` - + +### Documentation + +- Create examples of working with Prefect REST APIs — +- Add state change hook documentation - +- Add tip about private repositories in projects documentation — +- Improve runtime context documentation — +- Simplify the flow and task configuration documentation — +- Clarify task retries documentation — +- Fix typos in cloud documentation — +- Update automations documentation — +- Fix typo in tutorial documentation — +- Add tip on `keys` in artifacts documentation — +- Expand docstrings for artifacts — +- Update description of `image` parameter of `DockerContainer` in infrastructure documentation — +- Lowercase Prefect server where appropriate — +- Remove `Upgrading from Prefect Beta` section of installation page — +- Update rate limit documentation to include `/set_state` and `/flows` endpoint for Prefect Cloud — +- Update documentation links in UI to concepts when possible — + +## Contributors + +- @BitTheByte + +- @snikch made their first contribution in +- @rkscodes made their first contribution in +- @sarahmk125 made their first contribution in + +**All changes**: + +## Release 2.10.10 + +### The need for (CLI) speed + +We wanted the CLI to be as fast as the rest of Prefect. Through a series of enhancements, we've sped up CLI performance by as much as 4x on some systems! + +See the following pull requests for implementation details: + +- Delay `apprise` imports — +- Defer import of `dateparser` — +- Defer loading of Prefect integrations until necessary — +- Add `Block.get_block_class_from_key` and replace external uses of `lookup_type` — +- Load collections before auto-registering block types on the server — +- Do not restrict deployment build infrastructure types to types known at import time — + +### Enhancements + +- Handle `SIGTERM` received by workers gracefully — +- Add ability to view table artifacts with NaN values in the Prefect UI — +- Update `prefect version` command to avoid creating the database if it does not exist — +- Allow client retries when server SQLite database is busy — +- Allow client retries when general database errors are encountered — +- Ensure published Docker images have latest versions of requirements — + +### Fixes + +- Fix bug where `SIGTERM` was not properly captured as a flow run crash for flow runs created by a deployment — +- Fix deadlock when logging is overridden from an asynchronous context — +- Fix orchestration race conditions by adding lock for update to flow run state transitions — +- Fix date range filter on flow runs page — +- Fix bug where ephemeral server raised exceptions client-side — +- Fix bug where ARM64 Docker images had a corrupt database — + +### Documentation + +- Clarify the retry on tasks concept page — +- Improve the navigation structure and clarity of the API docs — +- Add `work_pool_name` to `Deployment.build_from_flow` on deployments concept page — +- Add additional worker types to work pools, workers & agents concept page — +- Add docstrings for all schema filters — + +**All changes**: + +## Release 2.10.9 + +### Worker logs can now be seen on the flow run page + +Workers now link relevant logs to specific flow runs, allowing you to view infrastructure-related logs on your flow run page. + +Process worker logs + +You'll see generic logs from all worker types. Integration worker implementations such as Kubernetes workers will be updated to send additional rich logs to give you insight into the behavior of flow run infrastructure. + +See for details. + +### Enhancements + +- Handle `SIGTERM` received by agent gracefully — +- Add global default settings for flow and task retries and retry delay seconds — +- Add support for populating submodules to `git_clone_project` projects step — +- Add wrapper for exceptions encountered while resolving parameter inputs — +- Add flush of logs before exiting deployed flow run processes to ensure messages are not lost — +- Update worker to be able to include itself as a related resource — + +### Fixes + +- Fix bug where `SIGTERM` was not properly captured as a flow run crash — +- Fix pass of optional parameters to API in `client.create_work_queue` — + +### Documentation + +- Add tip about flow run level concurrency — +- Add documentation on `on_failure` flow run state change hook — +- Update tutorials landing page — + +### Contributors + +- @andrewbrannan made their first contribution in + +- @ddelange + +**All changes**: + +## Release 2.10.8 + +### Flow run orchestration rule updates + +A flow run orchestration rule which was previously intended to prevent backwards transitions is updated in this release to allow most transitions. Now, it only prevents some transitions to `PENDING` states to prevent race conditions during handling of runs by multiple agents or workers. This improves orchestration behavior during infrastructure restarts. For example, when a Kubernetes pod is interrupted, the flow run can be rescheduled on a new pod by Kubernetes. Previously, Prefect would abort the run as it attempted to transition from a `RUNNING` to a `RUNNING` state. Now, Prefect will allow this transition and your flow run will continue. + +In summary, the following rules apply now: + +- `CANCELLED` -> `PENDING` is not allowed +- `CANCELLING`/`RUNNING` -> `RUNNING` is allowed +- `CANCELLING`/`RUNNING`/`PENDING` -> `SCHEDULED` is allowed + +See for details. + +### Enhancements + +- Display message when service back-off is reset to avoid confusion — +- Improve `QueueService` performance — + +### Fixes + +- Ensure deployment creation does not require write access when a prefectignore file exists — +- Fix bug where `deployment apply` command could hang on exit — + +### Deprecations + +- Add future warning for Python 3.7 EOL — + +### Documentation + +- Move creating a new worker type tutorial to guides — +- Fix `name` description in `deployment.yaml` reference — + +**All changes**: + +## Release 2.10.7 + +### New and improved Flows page + +This release combines the previously separate flows and deployments UI pages into a single, holistic page that brings together flows and deployments, as well as their recent and upcoming runs. You can now see the state of the most recent flow run for each flow and deployment, giving you a snapshot of the status of your workspace. In addition, you can now filter deployments by whether their schedule is active and the work pool to which flow runs are submitted. See for details. + +![flows-page](https://user-images.githubusercontent.com/3407835/236275227-04944fde-cdc2-4f44-bcae-eb65f4cafa0d.png) + +### `on_crashed` state change hook for flows + +This release introduces the new `on_crashed` hook for flows, allowing you to add client-side hooks that will be called when your flow crashes. This is useful for cases where you want to execute code without involving the Prefect API, and for custom handling on `CRASHED` terminal states. This callable hook will receive three arguments: `flow`, `flow_run`, and `state`. + +Here is an example of how to use the `on_crashed` hook in your flow: + +```python +from prefect import flow + +def crash_hook(flow, flow_run, state): + print("Don't Panic! But the flow has crashed...") + +@flow(on_crashed=[crash_hook]) +def my_flow(): + # call `crash_hook` if this flow enters a `CRASHED` state + pass + +if __name__ == '__main__': + my_flow() +``` + +Now, if your flow crashes, `crash_hook` will be executed! Notably, you can also call the same hook for a variety of terminal states, or call multiple hooks for the same terminal state. For example: + +```python +@flow(on_crashed=[my_hook], on_failure=[my_hook]) +def my_flow(): + # call the same hook if this flow enters a `FAILED` or `CRASHED` state + pass + +@flow(on_crashed=[my_first_hook, my_second_hook]) +def my_flow(): + # call two different hooks if this flow enters a `CRASHED` state + pass +``` + +See the [pull request](https://github.com/PrefectHQ/prefect/pull/9418) for implementation details. + +### Enhancements + +- Prevent unnecessarily verbose logs by updating `log_prints` to ignore prints where a custom `file` is used — +- Create a process work pool by default when a new worker is started with a new work pool name and no type — +- Add support for asynchronous project steps — +- Update `critical_service_loop` to retry on all 5XX HTTP status codes — +- Add backoff on failure to agent critical loop services — +- Add print statement to `git pull` to isolate issues between clone and execution — +- Add `on_crashed` flow run state change hook — +- Make build->push step explicit in docker project recipes — +- Add storage blocks to cli `deployment build` help description — +- Add `call_in_...` methods to the concurrency API — +- Add support for `Callable[[], T]` to concurrency API methods — +- Add a parameters JSON input option for deployments in the UI — [`#1405`](https://github.com/PrefectHQ/prefect-ui-library/pull/1405) +- Improve consistency in UI help modals — [`#1397`](https://github.com/PrefectHQ/prefect-ui-library/pull/1397) + +### Fixes + +- Add guard against null schedule in `deployment.yaml` — +- Fix issue preventing work pool filter from being applied to the flow runs page — +- Fix project recipe `image_name` and `tag` templating in docker-git, docker-gcs, and docker-s3 — +- Fix bug with work queues showing as unhealthy when a work queue with the same name is unhealthy — +- Fix bug where child flows would not fail the parent when they received invalid arguments — +- Fix schema values mapping on the create flow run forms to ensure all parameter values can be edited — [`#1407`](https://github.com/PrefectHQ/prefect-ui-library/pull/1407) +- Add a check for color scheme to ensure the flow run state favicon is visible — [`#1392`](https://github.com/PrefectHQ/prefect-ui-library/pull/1392) +- Fix deadlock during API log handler flush when logging configuration is overridden — +- Fix send/drain race conditions in queue services — +- Fix bug where missing trailing slash in remote filesystems path would cause download failures — + +### Documentation + +- Add a link to bug bounty program information — +- Add `Additional Resources` Section to Work Pools, Workers, & Agents page — +- Fix mistaken placement of `result_storage` parameter — +- Add concept list to concept section parent page — +- Add Paused and Cancelling states to states concept page — +- Update docs logos — +- Direct _Prefect Integration template_ link to the correct page — +- Update landing page image — + +### New Contributors + +- @rmorshea made their first contribution in + +**All changes**: + +## Release 2.10.6 + +### Deploy many flows at once with projects + +You can now declare multiple deployments for your project in the `deployment.yaml` file. When multiple deployments are declared in a project, you can deploy any number of those deployments at a time by providing the names of the deployments in the `prefect deploy` command. You can also deploy all the deployments in a project with the `--all` flag on the `prefect deploy` command. + +Deployments that are declared in a project are independent of each other and can be deployed to different work pools, on different schedules, or using different project actions. By default, deployments will use the build, pull, and push actions defined in the projects `prefect.yaml` file, but those actions can be overridden by setting build, pull, or push on a deployment declared in `deployment.yaml`. This enables patterns like different project storage methods and multiple Dockerfiles for a project. + +Because the deployments are all declared in a single YAML file, you can also take advantage of YAML anchors and aliases to avoid duplication in your `deployment.yaml` file. This enables declaring custom projects actions once and reusing them across different deployments or using the same schedule for multiple deployments. +To learn more about Projects, check out our [documentation](https://docs.prefect.io/latest/concepts/projects/) and [tutorials](https://docs.prefect.io/latest/tutorials/projects/) to quickly accelerate your flow deployment process! +See for details. + +### Improve run restart behavior + +Previously, transitions out of terminal states were allowed in very specific cases: + +- A task run could move from a failed/crashed/cancelled state to running if the flow run was retrying +- A flow run could move to a scheduled (awaiting retry) state + +These rules could prevent runs from executing again during manual restarts or worker rescheduling. We now allow transitions out of terminal states unless the run is completed _and_ has a persisted result to improve our behavior during these cases. + +For example, these changes enable the following behaviors: + +- A task run that fails and is orchestrated again will run instead of aborting +- A task run that completes but does not persist its result will run again on flow run retry +- A flow run may be rescheduled without using the "awaiting retry" name +- A flow run that fails and is orchestrated again will run instead of aborting + +See for details. + +### Enhancements + +- Add support for recursive flow calls — +- Add support for concurrent runs same flow — +- Add ability for `flow_run_name` and `task_run_name` settings to accept functions — +- Add pending items count to service failure exception message — +- Add `severity` key to JSON-formatted logs for GCP compatibility — +- Update orchestration rules to allow transitions from terminal states — +- Enable filtering flows by work pool at the `/flows/filter` endpoint — +- Add `--tail` option to `prefect flow-run logs` CLI — +- Enhance UI handling of flow run graph and accompanying selection panel — +- Enhance UI rendering of schema-generated forms (used for flow run creation, deployment editing, block configuration, notifications, and work pool job templates) and their values — +- Update icons and Prefect logo — +- Add results to task run page — +- Add artifacts to task run page — +- Show entrypoint and path in deployment details — +- Enhance clarity of error message by raising `UnfinishedRun` instead of `MissingResult` when state is not final — + +### Fixes + +- Ensure the Prefect UI displays actual parameters used to kick off a flow run — +- Ensure workers only create one client while running — +- Ensure services are drained on global loop shutdown — +- Show logs on pending flow run pages — +- Fix `flow-run logs --limit` — +- Fix `future.result()` and `future.wait()` calls from async contexts — +- Update `QueueService.send` to wait for the item to be placed in the queue before returning — +- Update `resolve_futures_to_data` and `resolve_futures_to_states` to wait for futures in the correct event loop — +- Fix bug where tasks were not called when debug mode was enabled — +- Fix bug where boolean values for new flow runs created through the UI were not sent if the value matched the deployment's schema default — +- Fix race condition in event loop thread start — + +### Documentation + +- Add tutorial for developing a new worker — +- Fix social cards to enable previews when linking documentation — +- Fix rendering of Prefect Server and Cloud feature list — +- Fix a broken link and clarify language — +- Update "Event Feed" screenshot — + +### New Contributors + +- @rsampaths16 made their first contribution in +- @Shubhamparashar made their first contribution in + +**All changes**: + +## Release 2.10.5 + +### Deploy a Prefect flow via Github Actions + +With the new [Deploy a Prefect flow](https://github.com/marketplace/actions/deploy-a-prefect-flow) GitHub Action, you can automate the build process for deployments orchestrated by Prefect Cloud. The action leverages the new [Projects](https://docs.prefect.io/latest/concepts/projects/) system. See the [action page](https://github.com/marketplace/actions/deploy-a-prefect-flow) for examples and configuration options. + +### Cloud Provider Workers + +Workers, Prefect's next-generation agents, have dedicated infrastructure types. This week, we are releasing typed workers for each major cloud provider: AWS, GCP, and Azure. You will be able to find them in the [prefect-aws](https://github.com/PrefectHQ/prefect-aws), [prefect-gcp](https://prefecthq.github.io/prefect-gcp/), and [prefect-azure](https://github.com/PrefectHQ/prefect-azure) collections, respectively. + +See the following pull requests for implementation details: + +- +- +- +- + +### Enhancements + +- Add `idempotency_key` to flow runs filter — [#8600](https://github.com/PrefectHQ/prefect/pull/8600) +- Add `details` tab to flow run page and increase flow run graph width — [#9258](https://github.com/PrefectHQ/prefect/pull/9258) +- Add status code to base client log on retry — [#9265](https://github.com/PrefectHQ/prefect/pull/9265) + +### Fixes + +- Fix issue in which work queues were duplicated in the `default-agent-pool` when creating a deployment — [#9046](https://github.com/PrefectHQ/prefect/pull/9046) +- Add `configuration` to `Worker.kill_infrastructure` signature — [#9250](https://github.com/PrefectHQ/prefect/pull/9250) +- Update `critical_service_loop` to throw a runtime error on failure — [#9267](https://github.com/PrefectHQ/prefect/pull/9267) +- Fix pip requirement inference compatibility with Python 3.11+ and pip 23.1+ — [#9278](https://github.com/PrefectHQ/prefect/pull/9278) +- Fix validation error occurring on default values in `variables` schema of `Workpool.base_job_template` [#9282](https://github.com/PrefectHQ/prefect/pull/9282) + +### Experimental + +- Add `worker.executed-flow-run` event — [#9227](https://github.com/PrefectHQ/prefect/pull/9227) +- Emit events for worker lifecycle — [#9249](https://github.com/PrefectHQ/prefect/pull/9249) +- Emit `cancelled-flow-run` event when worker cancels a flow run — [#9255](https://github.com/PrefectHQ/prefect/pull/9255) + +### Documentation + +- Fix broken link on docs landing page — [#9247](https://github.com/PrefectHQ/prefect/pull/9247) +- Remove outdated warning from task run concurrency UI docs — [#9256](https://github.com/PrefectHQ/prefect/pull/9256) +- Add `edit` button to docs to improve ability to fix documentation — [#9259](https://github.com/PrefectHQ/prefect/pull/9259) +- Remove UI documentation pages, reorganize content, and simplify side bar navigation structure — [#9039](https://github.com/PrefectHQ/prefect/pull/9039) +- Add tutorial for creating a worker — [#9179](https://github.com/PrefectHQ/prefect/pull/9179) +- Add GitHub Action to trigger versioned builds in docs repository — [#8984](https://github.com/PrefectHQ/prefect/pull/8984) + +**All changes**: + ## Release 2.10.4 This release further refines Prefect 2.10 with enhancements for [project deployments](https://docs.prefect.io/latest/concepts/projects/#the-deployment-yaml-file) and [workers](https://docs.prefect.io/latest/concepts/work-pools/#worker-overview), fixes for flow run cancellation and the worker CLI, and more. ### More flexible project deployments + Prior to this release, removing keys from a project's `deployment.yaml` caused an error. Thanks to the changes in [#9190](https://github.com/PrefectHQ/prefect/pull/9190), Prefect now uses default values for any required keys missing from your project's configuration. ### Enhancements + - Allow partial `deployment.yaml` files for projects by using defaults for missing values — [#9190](https://github.com/PrefectHQ/prefect/pull/9190) -- Add flow run cancellation support for workers - [#9198](https://github.com/PrefectHQ/prefect/pull/9198) +- Add flow run cancellation support for workers — [#9198](https://github.com/PrefectHQ/prefect/pull/9198) ### Fixes -- Prevent scheduled flow runs from getting stuck in `CANCELLING` state — [#8414](https://github.com/PrefectHQ/prefect/pull/8414) + +- Prevent scheduled flow runs from getting stuck in `CANCELLING` state — [#8414](https://github.com/PrefectHQ/prefect/pull/8414) - Fix `work_queues` and `worker_type` arguments for the `prefect worker start` CLI command — [#9154](https://github.com/PrefectHQ/prefect/pull/9154) - Fix overflow in flow run logger UI [`#1342`](https://github.com/PrefectHQ/prefect-ui-library/pull/1342) - Fix schema form handling of reference objects [`#1332`](https://github.com/PrefectHQ/prefect-ui-library/pull/1332) - Improve flow graph UX by suppressing shortcuts when a metakey is active [`#1333`](https://github.com/PrefectHQ/prefect-ui-library/pull/1333) ### Experimental + - Emit an event when a worker submits a flow run for execution — [#9203](https://github.com/PrefectHQ/prefect/pull/9203) ### Documentation + - Fix a broken link by removing an obsolete redirect — [#9189](https://github.com/PrefectHQ/prefect/pull/9189) - Add polling interval information to worker and agent documentation — [#9209](https://github.com/PrefectHQ/prefect/pull/9209) - Update documentation badge styling to improve docs usability — [#9207](https://github.com/PrefectHQ/prefect/pull/9207) +**All changes**: ## Release 2.10.3 -This release builds on 2.10 to further improve the experience of setting up and deploying from [a prefect project](https://docs.prefect.io/latest/tutorials/projects/). In particular, initializing with a recipe now initializes an interactive CLI experience that guides you to a correct setup. This experience can be avoided for programmatic initialization by providing all required fields for the recipe via CLI. For more information, see [the project documentation](https://docs.prefect.io/latest/concepts/projects/). We will continue to enhance the deployment experience as we receive feedback, so please keep it coming! +This release builds on 2.10 to further improve the experience of setting up and deploying from [a prefect project](https://docs.prefect.io/latest/tutorials/projects/). In particular, initializing with a recipe now initializes an interactive CLI experience that guides you to a correct setup. This experience can be avoided for programmatic initialization by providing all required fields for the recipe via CLI. For more information, see [the project documentation](https://docs.prefect.io/latest/concepts/projects/). We will continue to enhance the deployment experience as we receive feedback, so please keep it coming! This release also includes [a critical fix](https://github.com/PrefectHQ/prefect/pull/9180) for Prefect logs that were sometimes delayed in being sent to the API. - ### Enhancements -- Rename `__root_path__` to `__development_base_path__` — https://github.com/PrefectHQ/prefect/pull/9136 -- Include flow run and flow as related resources when emitting events via the events worker — https://github.com/PrefectHQ/prefect/pull/9129 -- Cloud storage recipe improvements — https://github.com/PrefectHQ/prefect/pull/9145 -- Use new sessions and transactions for each query during `CancellationCleanup` — https://github.com/PrefectHQ/prefect/pull/9124 -- Stream `git` output during `git_clone_project` — https://github.com/PrefectHQ/prefect/pull/9149 -- Update deployment defaults with project init — https://github.com/PrefectHQ/prefect/pull/9146 -- Mock runtime via environment variable — https://github.com/PrefectHQ/prefect/pull/9156 -- Wire up scheduling kwargs to deploy CLI — https://github.com/PrefectHQ/prefect/pull/9176 -- Add deployment and flow filters to `/artifacts/filter` and `/artifacts/latest/filter` routes — https://github.com/PrefectHQ/prefect/pull/9089 -- Add `/artifacts/latest/count` route — https://github.com/PrefectHQ/prefect/pull/9090 -- add flow run to task run logging — https://github.com/PrefectHQ/prefect/pull/9170 -- Add pragma statements automatically if sqlite — https://github.com/PrefectHQ/prefect/pull/9169 -- Improved recipe initialization UX — https://github.com/PrefectHQ/prefect/pull/9158 + +- Rename `prefect.__root_path__` to `prefect.__development_base_path__` — +- Include flow run and flow as related resources when emitting events via the events worker — +- Improve Cloud storage Projects recipes — +- Use new sessions and transactions for each query during `CancellationCleanup` — +- Stream `git` output during `git_clone_project` — +- Update deployment defaults with project init — +- Add ability to mock `prefect.runtime` attributes via environment variable — +- Add scheduling options to deploy CLI — +- Add deployment and flow filters to `/artifacts/filter` and `/artifacts/latest/filter` routes — +- Add `/artifacts/latest/count` route — +- Add flow run metadata to task run logger — +- Add pragma statements automatically if sqlite writing database migrations for SQLite — +- Improve Projects `recipe` initialization UX — ### Fixes -- Update `prefect deploy` to pull `flow_name` and `entrypoint` from deployment.yaml if specified — https://github.com/PrefectHQ/prefect/pull/9157 -- Fix bug where non-zero status codes would be reported when deployed flow runs paused or failed — https://github.com/PrefectHQ/prefect/pull/9175 -- Hide command when access token is provided and `git_clone_project` fails — https://github.com/PrefectHQ/prefect/pull/9150 -- Fix bug where log worker only sent logs to API on flush rather than on an interval — https://github.com/PrefectHQ/prefect/pull/9180 -- Fix apply artifact collection filter — https://github.com/PrefectHQ/prefect/pull/9153 + +- Update `prefect deploy` to pull `flow_name` and `entrypoint` from deployment.yaml if specified — +- Fix bug where non-zero status codes would be reported when deployed flow runs paused or failed — +- Hide command when access token is provided and `git_clone_project` fails — +- Fix bug where log worker only sent logs to API on flush rather than on an interval — +- Fix apply artifact collection filter — ### Documentation -- Adds artifacts to api-ref — https://github.com/PrefectHQ/prefect/pull/9143 -- Expands upon project step documentation — https://github.com/PrefectHQ/prefect/pull/9151 -- small project doc fixes — https://github.com/PrefectHQ/prefect/pull/9161 + +- Add artifacts to API reference — +- Expand upon Projects `steps` documentation — ### Collections -- add prefect-spark-on-k8s-operator to integrations catalog list. — https://github.com/PrefectHQ/prefect/pull/9029 + +- Add `prefect-spark-on-k8s-operator` to integrations catalog list — ### Contributors -* @tardunge made their first contribution in https://github.com/PrefectHQ/prefect/pull/9029 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.10.2...2.10.3 +- @tardunge made their first contribution in + +**All changes**: ## Release 2.10.2 -Fixes a bug where deployments were not downloaded from remote storage blocks during flow runs — https://github.com/PrefectHQ/prefect/pull/9138 +Fixes a bug where deployments were not downloaded from remote storage blocks during flow runs — ### Enhancements -- Add httpx.ConnectTimeout to the list of retry exceptions in base client — https://github.com/PrefectHQ/prefect/pull/9125 + +- Add httpx.ConnectTimeout to the list of retry exceptions in base client — ### Contributors -* @sorendaugaard made their first contribution in https://github.com/PrefectHQ/prefect/pull/9125 +- @sorendaugaard made their first contribution in -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.10.1...2.10.2 +**All changes**: ## Release 2.10.1 @@ -89,11 +4872,11 @@ Fixes a bug with accessing project recipes through the CLI. See the [pull reques ## Release 2.10.0 -Prefect deployments often have critical, implicit dependencies on files and build artifacts, such as containers, that are created and stored outside of Prefect. Each of these dependencies is a potential stumbling block when deploying a flow - you need to ensure that they're satisfied for your flow to run successfully. In this release, we're introducing two new beta features, workers and projects, to help you better manage your flow deployment process. Additionally, we're releasing variables for centralized management of management and expanding events and automations to include blocks. There are a lot of highlighted features this week — but we've also made some significant performance improvements alongside a slew of bug fixes and enhancements! +Prefect deployments often have critical, implicit dependencies on files and build artifacts, such as containers, that are created and stored outside of Prefect. Each of these dependencies is a potential stumbling block when deploying a flow — you need to ensure that they're satisfied for your flow to run successfully. In this release, we're introducing two new beta features, workers and projects, to help you better manage your flow deployment process. Additionally, we're releasing variables for centralized management of management and expanding events and automations to include blocks. There are a lot of highlighted features this week — but we've also made some significant performance improvements alongside a slew of bug fixes and enhancements! ### Workers [Beta] -Workers are next-generation agents, designed from the ground up to interact with [work pools](https://docs.prefect.io/concepts/work-pools/). Each worker manages flow run infrastructure of a specific type and must pull from a work pool with a matching type. Existing work pools are all "agent" typed for backwards compatibility with our agents — but new work pools can be assigned a specific infrastructure type. Specifying a type for a work pool simplifies choosing what kind of infrastructure will be used when creating a flow run. +Workers are next-generation agents, designed from the ground up to interact with [work pools](https://docs.prefect.io/concepts/work-pools/). Each worker manages flow run infrastructure of a specific type and must pull from a work pool with a matching type. Existing work pools are all "agent" typed for backwards compatibility with our agents — but new work pools can be assigned a specific infrastructure type. Specifying a type for a work pool simplifies choosing what kind of infrastructure will be used when creating a flow run. Work pools expose rich configuration of their infrastructure. Every work pool type has a base configuration with sensible defaults such that you can begin executing work with just a single command. The infrastructure configuration is fully customizable from the Prefect UI. For example, you can now customize the entire payload used to run flows on Kubernetes — you are not limited to the fields Prefect exposes in its SDK. We provide templating to inject runtime information and common settings into infrastructure creation payloads. Advanced users can add _custom_ template variables which are then exposed the same as Prefect's default options in an easy to use UI. @@ -123,17 +4906,17 @@ Projects are a contract between you and a worker, specifying what you do when yo See the new [project concept doc](https://docs.prefect.io/latest/concepts/projects/) for more information or the following pull requests for implementation details: -- https://github.com/PrefectHQ/prefect/pull/8930 -- https://github.com/PrefectHQ/prefect/pull/9103 -- https://github.com/PrefectHQ/prefect/pull/9105 -- https://github.com/PrefectHQ/prefect/pull/9112 -- https://github.com/PrefectHQ/prefect/pull/9093 -- https://github.com/PrefectHQ/prefect/pull/9083 -- https://github.com/PrefectHQ/prefect/pull/9041 +- +- +- +- +- +- +- ### Variables -Variables enable you to store and reuse non-sensitive bits of data, such as configuration information. Variables are named, mutable string values, much like environment variables. They are scoped to a Prefect Server instance or a single workspace in Prefect Cloud. Variables can be created or modified at any time. While variable values are most commonly loaded during flow runtime, they can be loaded in other contexts, at any time, such that they can be used to pass configuration information to Prefect configuration files, such as project steps. You can access any variable via the Python SDK via the `.get()` method. +Variables enable you to store and reuse non-sensitive bits of data, such as configuration information. Variables are named, mutable string values, much like environment variables. They are scoped to a Prefect Server instance or a single workspace in Prefect Cloud. Variables can be created or modified at any time. While variable values are most commonly loaded during flow runtime, they can be loaded in other contexts, at any time, such that they can be used to pass configuration information to Prefect configuration files, such as project steps. You can access any variable via the Python SDK via the `.get()` method. ```python from prefect import variables @@ -171,62 +4954,66 @@ Continuing the rollout of events[https://docs.prefect.io/concepts/events-and-res We're releasing a lot of new features every week and we know not everyone is on the latest version of Prefect. We've added versioning to our documentation website to make it easier to find the docs for the version of Prefect that you're using. -Now, when you visit the Prefect documentation site, you'll see a version selector at the top of the page. +Now, when you visit the Prefect documentation site, you'll see a version selector at the top of the page. ![versioned docs](https://user-images.githubusercontent.com/228762/230432235-26fc9406-1390-4c63-9956-b8cdabdfba6f.png) - ### Breaking Changes -- Unused options for sorting logs have been removed from the API — https://github.com/PrefectHQ/prefect/pull/7873 - -### Enhancements -- Add artifacts view to flow run page — https://github.com/PrefectHQ/prefect/pull/9109 -- Improve performance of the background event worker — https://github.com/PrefectHQ/prefect/pull/9019 -- Update deployment flow run creation to default to a SCHEDULED state instead of PENDING — https://github.com/PrefectHQ/prefect/pull/9049 -- Add `PREFECT_CLIENT_RETRY_EXTRA_CODES` to allow retry on additional HTTP status codes — https://github.com/PrefectHQ/prefect/pull/9056 -- Improve performance of the background log worker — https://github.com/PrefectHQ/prefect/pull/9048 -- Update agent cancellation check interval to double the scheduled check interval — https://github.com/PrefectHQ/prefect/pull/9084 -- Update default agent query interval from 10s to 15s — https://github.com/PrefectHQ/prefect/pull/9085 -- Add a 10 minute cache to API healthchecks — https://github.com/PrefectHQ/prefect/pull/9069 -- Improve performance of concurrent task runner — https://github.com/PrefectHQ/prefect/pull/9073 -- Improve performance of waiting for task submission — https://github.com/PrefectHQ/prefect/pull/9072 -- Add retry on 502 BAD GATEWAY to client — https://github.com/PrefectHQ/prefect/pull/9102 -- Update local and remote file systems to return path on write — https://github.com/PrefectHQ/prefect/pull/8965 -- Add artifacts `/count` route — https://github.com/PrefectHQ/prefect/pull/9022 -- Improve performance of automatic block registration — https://github.com/PrefectHQ/prefect/pull/8838 -- Improve performance of log retrieval queries — https://github.com/PrefectHQ/prefect/pull/9035 -- Improve performance of artifact retrieval — https://github.com/PrefectHQ/prefect/pull/9061 / https://github.com/PrefectHQ/prefect/pull/9064 -- Add `--type` option to create work-pool CLI — https://github.com/PrefectHQ/prefect/pull/8993 -- Improve flow run timeline performance - https://github.com/PrefectHQ/prefect-ui-library/pull/1315 -- Add flow names to sub flows on the flow run timeline graph - https://github.com/PrefectHQ/prefect-ui-library/pull/1304 - -### Fixes -- Fix bug where iterable defaults were treated as mapped parameters — https://github.com/PrefectHQ/prefect/pull/9021 -- Fix sequential execution with mapped tasks using the SequentialTaskRunner — https://github.com/PrefectHQ/prefect/pull/8473 -- Fix race condition where futures did not wait for submission to complete — https://github.com/PrefectHQ/prefect/pull/9070 -- Fix detection of iterables within `quote` annotations while mapping — https://github.com/PrefectHQ/prefect/pull/9095 -- Fix Dockerfile copy of UI package files on latest Docker version — https://github.com/PrefectHQ/prefect/pull/9077 - -### Documentation -- Add copy to clipboard button in documentation code blocks — https://github.com/PrefectHQ/prefect/pull/9026 -- Fixed styling of deployments mermaid diagram — https://github.com/PrefectHQ/prefect/pull/9017 -- Add documentation for database migrations — https://github.com/PrefectHQ/prefect/pull/9044 -- Adds documentation for BitBucket to flow code storage types — https://github.com/PrefectHQ/prefect/pull/9080 -- Update rate limit documentation for Cloud — https://github.com/PrefectHQ/prefect/pull/9100 - -### Contributors -- @mianos made their first contribution in https://github.com/PrefectHQ/prefect/pull/9077 -- @dominictarro made their first contribution in https://github.com/PrefectHQ/prefect/pull/8965 + +- Unused options for sorting logs have been removed from the API — + +### Enhancements + +- Add artifacts view to flow run page — +- Improve performance of the background event worker — +- Update deployment flow run creation to default to a SCHEDULED state instead of PENDING — +- Add `PREFECT_CLIENT_RETRY_EXTRA_CODES` to allow retry on additional HTTP status codes — +- Improve performance of the background log worker — +- Update agent cancellation check interval to double the scheduled check interval — +- Update default agent query interval from 10s to 15s — +- Add a 10 minute cache to API healthchecks — +- Improve performance of concurrent task runner — +- Improve performance of waiting for task submission — +- Add retry on 502 BAD GATEWAY to client — +- Update local and remote file systems to return path on write — +- Add artifacts `/count` route — +- Improve performance of automatic block registration — +- Improve performance of log retrieval queries — +- Improve performance of artifact retrieval — / +- Add `--type` option to create work-pool CLI — +- Improve flow run timeline performance — +- Add flow names to sub flows on the flow run timeline graph — + +### Fixes + +- Fix bug where iterable defaults were treated as mapped parameters — +- Fix sequential execution with mapped tasks using the SequentialTaskRunner — +- Fix race condition where futures did not wait for submission to complete — +- Fix detection of iterables within `quote` annotations while mapping — +- Fix Dockerfile copy of UI package files on latest Docker version — + +### Documentation + +- Add copy to clipboard button in documentation code blocks — +- Fixed styling of deployments mermaid diagram — +- Add documentation for database migrations — +- Adds documentation for BitBucket to flow code storage types — +- Update rate limit documentation for Cloud — + +### Contributors + +- @mianos made their first contribution in +- @dominictarro made their first contribution in - @joelluijmes - @john-jam -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.9.0...2.10.0 +**All changes**: ## Release 2.9.0 ### Track and manage artifacts -Most workflows produce or update an artifact of some kind, whether it's a table, a file, or a model. With Prefect Artifacts, you can track changes to these outputs and richly display them in the UI as tables, markdown, and links. Artifacts may be associated with a particular task run, flow run, or even exist outside a flow run context, enabling you to not only observe your flows, but the objects that they interact with as well. +Most workflows produce or update an artifact of some kind, whether it's a table, a file, or a model. With Prefect Artifacts, you can track changes to these outputs and richly display them in the UI as tables, markdown, and links. Artifacts may be associated with a particular task run, flow run, or even exist outside a flow run context, enabling you to not only observe your flows, but the objects that they interact with as well. ![Artifacts top-level view](https://user-images.githubusercontent.com/27291717/228905742-0bad7874-6b6b-4000-9111-1c4d0e0bd6e1.png) @@ -270,16 +5057,17 @@ You can view your artifacts in the Artifacts page of the Prefect UI, easily sear ![Table artifact in a timeline view](https://user-images.githubusercontent.com/27291717/228905740-bd297de9-6381-45ec-aba3-8b72def70a08.png) See [the documentation](https://docs.prefect.io/concepts/artifacts) for more information, as well as the following pull requests for implementation details: -- https://github.com/PrefectHQ/prefect/pull/9003 -- https://github.com/PrefectHQ/prefect/pull/8832 -- https://github.com/PrefectHQ/prefect/pull/8932 -- https://github.com/PrefectHQ/prefect/pull/8875 -- https://github.com/PrefectHQ/prefect/pull/8874 -- https://github.com/PrefectHQ/prefect/pull/8985 + +- +- +- +- +- +- ### Configure result storage keys -When persisting results, Prefect stores data at a unique, randomly-generated path. While this is convenient for ensuring the result is never overwritten, it limits organization of result files. In this release, we've added configuration of result storage keys, which gives you control over the result file path. Result storage keys can be dynamically formatted with access to all of the modules in `prefect.runtime` and the run's `parameters`. +When persisting results, Prefect stores data at a unique, randomly-generated path. While this is convenient for ensuring the result is never overwritten, it limits organization of result files. In this release, we've added configuration of result storage keys, which gives you control over the result file path. Result storage keys can be dynamically formatted with access to all of the modules in `prefect.runtime` and the run's `parameters`. For example, you can name each result to correspond to the flow run that produced it and a parameter it received: @@ -303,14 +5091,15 @@ my_flow() ``` Which will persist three result files in the storage directory: + ``` -$ ls ~/.prefect/storage | grep "hello__" +$ ls ~/.prefect/storage | grep "hello__" hello__rousing-mushroom__bar.json hello__rousing-mushroom__foo.json hello__rousing-mushroom__world.json ``` -See [the documentation](https://docs.prefect.io/concepts/results/#result-storage-key) for more information. +See [the documentation](https://docs.prefect.io/concepts/results/#result-storage-key) for more information. ### Expanded `prefect.runtime` @@ -328,43 +5117,49 @@ The `prefect.runtime` module is now the preferred way to access information abou See [the documentation](https://docs.prefect.io/concepts/runtime-context/) for more information. See the following pull requests for implementation details: -- https://github.com/PrefectHQ/prefect/pull/8947 -- https://github.com/PrefectHQ/prefect/pull/8948 -- https://github.com/PrefectHQ/prefect/pull/8949 -- https://github.com/PrefectHQ/prefect/pull/8951 -- https://github.com/PrefectHQ/prefect/pull/8954 -- https://github.com/PrefectHQ/prefect/pull/8956 - -### Enhancements -- Add unique integers to worker thread names for inspection - https://github.com/PrefectHQ/prefect/pull/8908 -- Add support to `JSONSerializer` for serialization of exceptions so they are persisted even on failure - https://github.com/PrefectHQ/prefect/pull/8922 -- Add Gzip middleware to the UI and API FastAPI apps for compressing responses - https://github.com/PrefectHQ/prefect/pull/8931 -- Update the runtime to detect flow run information from task run contexts — https://github.com/PrefectHQ/prefect/pull/8951 - -### Fixes -- Fix imports in copytree backport for Python 3.7 - https://github.com/PrefectHQ/prefect/pull/8925 -- Retry on sqlite operational errors - https://github.com/PrefectHQ/prefect/pull/8950 -- Add 30 second timeout to shutdown of the log worker thread — https://github.com/PrefectHQ/prefect/pull/8983 - -### Documentation -- Disambiguate reference to "Blocks" - https://github.com/PrefectHQ/prefect/pull/8921 -- Fix broken concepts link - https://github.com/PrefectHQ/prefect/pull/8923 -- Add note about fine-grained PAT format - https://github.com/PrefectHQ/prefect/pull/8929 -- Add `UnpersistedResult` type - https://github.com/PrefectHQ/prefect/pull/8953 -- Update docs CSS and config for versioning compatibility - https://github.com/PrefectHQ/prefect/pull/8957 -- Clarify Filesystem package dependencies - https://github.com/PrefectHQ/prefect/pull/8989 -- Update flow runs documentation - https://github.com/PrefectHQ/prefect/pull/8919 -- Fix missing backticks on Work Pools concept page - https://github.com/PrefectHQ/prefect/pull/8942 -- Update links to the release notes in the installation guide - https://github.com/PrefectHQ/prefect/pull/8974 -- Fix `EXTRA_PIP_PACKAGES` info in Docker guide — https://github.com/PrefectHQ/prefect/pull/8995 -- Fix `KubernetesJob.job_watch_timeout_seconds` docstring — https://github.com/PrefectHQ/prefect/pull/8977 -- Add task run runtime to API reference — https://github.com/PrefectHQ/prefect/pull/8998 -- Add documentation for runtime context — https://github.com/PrefectHQ/prefect/pull/8999 - -### Contributors -- @andreadistefano made their first contribution in https://github.com/PrefectHQ/prefect/pull/8942 -- @knl made their first contribution in https://github.com/PrefectHQ/prefect/pull/8974 -- @thomas-te made their first contribution in https://github.com/PrefectHQ/prefect/pull/8959 + +- +- +- +- +- +- + +### Enhancements + +- Add unique integers to worker thread names for inspection — +- Add support to `JSONSerializer` for serialization of exceptions so they are persisted even on failure — +- Add Gzip middleware to the UI and API FastAPI apps for compressing responses — +- Update the runtime to detect flow run information from task run contexts — + +### Fixes + +- Fix imports in copytree backport for Python 3.7 — +- Retry on sqlite operational errors — +- Add 30 second timeout to shutdown of the log worker thread — + +### Documentation + +- Disambiguate reference to "Blocks" — +- Fix broken concepts link — +- Add note about fine-grained PAT format — +- Add `UnpersistedResult` type — +- Update docs CSS and config for versioning compatibility — +- Clarify Filesystem package dependencies — +- Update flow runs documentation — +- Fix missing backticks on Work Pools concept page — +- Update links to the release notes in the installation guide — +- Fix `EXTRA_PIP_PACKAGES` info in Docker guide — +- Fix `KubernetesJob.job_watch_timeout_seconds` docstring — +- Add task run runtime to API reference — +- Add documentation for runtime context — + +### Contributors + +- @andreadistefano made their first contribution in +- @knl made their first contribution in +- @thomas-te made their first contribution in + ## Release 2.8.7 If you have been watching the experimental section of our release notes, you may have noticed a lot of work around concurrency tooling, flow run graph enhancements, and result artifacts. With this release, these experiments have culminated into exciting features! @@ -377,10 +5172,10 @@ The behavioral changes include: - All orchestration of flows and tasks happens in a dedicated worker thread - Synchronous flows are run on the main thread instead of worker threads - - Solves problems where flow code must be in the main thread e.g. https://github.com/PrefectHQ/prefect/issues/5991 + — Solves problems where flow code must be in the main thread e.g. - Asynchronous flows no longer share an event loop with the Prefect engine - Flow timeouts are now enforced with signals - - Allows interrupt of long-running system calls like `sleep` for more effective timeout enforcement + — Allows interrupt of long-running system calls like `sleep` for more effective timeout enforcement - Asynchronous flows can be called from sync flows - Asynchronous tasks can be used as upstream dependencies for sync tasks in async flows - Synchronous tasks can be submitted from asynchronous flows @@ -388,21 +5183,21 @@ The behavioral changes include: - Flows with thousands of synchronous tasks are less likely to crash - Debug mode now enables verbose logging from Prefect concurrency internals - The API limits itself to 100 concurrent requests when using SQLite as a backend - - Avoids database file contention when using high levels of concurrency + — Avoids database file contention when using high levels of concurrency - Resolving task inputs no longer uses worker threads - - Resolves issues where large numbers of upstream task inputs would cause deadlocks - - Instead of using worker threads, we wait for upstream tasks on the event loop to support high levels of concurrency + — Resolves issues where large numbers of upstream task inputs would cause deadlocks + — Instead of using worker threads, we wait for upstream tasks on the event loop to support high levels of concurrency See the following pull requests for implementation details: -- https://github.com/PrefectHQ/prefect/pull/8702 -- https://github.com/PrefectHQ/prefect/pull/8887 -- https://github.com/PrefectHQ/prefect/pull/8903 -- https://github.com/PrefectHQ/prefect/pull/8830 +- +- +- +- ### Results tab on flow run pages -The Prefect UI now renders information about your flow run and task run results! +The Prefect UI now renders information about your flow run and task run results! This view provides a visual representation of the output of your tasks and flows and, when possible, provides links to results persisted using any of our storage blocks. To see this in your UI, run any flow and navigate to the run page; from there you'll see a new tab, "Results": @@ -410,11 +5205,12 @@ This view provides a visual representation of the output of your tasks and flows ![Results grid view](https://user-images.githubusercontent.com/27291717/227274578-35673508-09e2-4b83-bc22-11538f813eea.png) See the following pull requests for implementation details: -- https://github.com/PrefectHQ/prefect-ui-library/pull/1207 -- https://github.com/PrefectHQ/prefect-ui-library/pull/1213 -- https://github.com/PrefectHQ/prefect-ui-library/pull/1223 -- https://github.com/PrefectHQ/prefect/pull/8904 -- https://github.com/PrefectHQ/prefect/pull/8759 + +- +- +- +- +- ### Flow run graph @@ -424,42 +5220,51 @@ We heard that people loved the simplicity and sleekness of the timeline on the f Subflow run expansion ### Enhancements -- Add `--reverse` option to the flow run logs CLI to view logs in descending order — https://github.com/PrefectHQ/prefect/pull/8625 -- Show all flow runs for deployments rather than just the last 7 days — https://github.com/PrefectHQ/prefect/pull/8837 -- Add jitter to Prefect client request retries — https://github.com/PrefectHQ/prefect/pull/8839 -- Add `deployment.name` and `deployment.version` to `prefect.runtime` — https://github.com/PrefectHQ/prefect/pull/8864 -- Add `flow_run.scheduled_start_time` to `prefect.runtime` — https://github.com/PrefectHQ/prefect/pull/8864 -- Adjust SQLite sync mode for improved performance — https://github.com/PrefectHQ/prefect/pull/8071 -- Add debug level log of active profile on module import — https://github.com/PrefectHQ/prefect/pull/8856 -- Update server to use new FastAPI lifespan context manager — https://github.com/PrefectHQ/prefect/pull/8842 -- Add support for variadic keyword arguments to `Task.map` — https://github.com/PrefectHQ/prefect/pull/8188 -- Show the full run history in the UI — https://github.com/PrefectHQ/prefect/pull/8885 + +- Add `--reverse` option to the flow run logs CLI to view logs in descending order — +- Show all flow runs for deployments rather than just the last 7 days — +- Add jitter to Prefect client request retries — +- Add `deployment.name` and `deployment.version` to `prefect.runtime` — +- Add `flow_run.scheduled_start_time` to `prefect.runtime` — +- Adjust SQLite sync mode for improved performance — +- Add debug level log of active profile on module import — +- Update server to use new FastAPI lifespan context manager — +- Add support for variadic keyword arguments to `Task.map` — +- Show the full run history in the UI — ### Fixes -- Fix `prefect dev start` failure — https://github.com/PrefectHQ/prefect/pull/8850 -- Fix bug where `propose_state` could exceed recursion limits during extended waits — https://github.com/PrefectHQ/prefect/pull/8827 -- Fix configuration of flow run infrastructure when using agent default — https://github.com/PrefectHQ/prefect/pull/8872 -- Fix saving block document secrets that have not been modified — https://github.com/PrefectHQ/prefect/pull/8848 -- Disable SLSA provenance setting in Docker buildx to resolve image pull errors with certain Cloud providers — https://github.com/PrefectHQ/prefect/pull/8889 -- Fix race condition in worker thread start — https://github.com/PrefectHQ/prefect/pull/8886 -- The state message has been returned to the flow run metadata panel on the right side of the flow run page - https://github.com/PrefectHQ/prefect/pull/8885 + +- Fix `prefect dev start` failure — +- Fix bug where `propose_state` could exceed recursion limits during extended waits — +- Fix configuration of flow run infrastructure when using agent default — +- Fix saving block document secrets that have not been modified — +- Disable SLSA provenance setting in Docker buildx to resolve image pull errors with certain Cloud providers — +- Fix race condition in worker thread start — +- The state message has been returned to the flow run metadata panel on the right side of the flow run page — ### Experimental -- Update to worker base job template logic for nested placeholders — https://github.com/PrefectHQ/prefect/pull/8795 -- Require lowercase artifact `key` field — https://github.com/PrefectHQ/prefect/pull/8860 -- Create `emit_event` helper that takes args for an `Event` and emits it via a worker — https://github.com/PrefectHQ/prefect/pull/8867 -- Allow multiple artifacts to have the same key — https://github.com/PrefectHQ/prefect/pull/8855 -- Add common values to job configuration prior to flow run submission — https://github.com/PrefectHQ/prefect/pull/8826 + +- Update to worker base job template logic for nested placeholders — +- Require lowercase artifact `key` field — +- Create `emit_event` helper that takes args for an `Event` and emits it via a worker — +- Allow multiple artifacts to have the same key — +- Add common values to job configuration prior to flow run submission — ### Deprecations -- Creating data documents will now throw deprecation warnings — https://github.com/PrefectHQ/prefect/pull/8760 + +- Creating data documents will now throw deprecation warnings — ### Documentation -- Add documentation for events and resources — https://github.com/PrefectHQ/prefect/pull/8858 + +- Add documentation for events and resources — ### Contributors -* @lounis89 made their first contribution in https://github.com/PrefectHQ/prefect/pull/8625 -* @mesejo made their first contribution in https://github.com/PrefectHQ/prefect/pull/8842 + +- @lounis89 made their first contribution in + +- @mesejo made their first contribution in + +**All changes**: ## Release 2.8.6 @@ -479,116 +5284,137 @@ def my_flow(): This will create a Dask client whose name mirrors the flow run ID. Similarly, you can use `prefect.runtime` to access parameters that were passed to this deployment run via `prefect.runtime.deployment.parameters`. Note that all of these attributes will be empty if they are not available. -See https://github.com/PrefectHQ/prefect/pull/8790 for details. +See for details. ### Enhancements -- Add deployment id support to `run_deployment` — https://github.com/PrefectHQ/prefect/pull/7958 -- Disable Postgres JIT for performance improvements — https://github.com/PrefectHQ/prefect/pull/8804 + +- Add deployment id support to `run_deployment` — +- Disable Postgres JIT for performance improvements — ### Fixes -- Fix blocking file read in async method `Deployment.load_from_yaml` — https://github.com/PrefectHQ/prefect/pull/8798 -- Allow tasks and flows to make redundant transitions such as `RUNNING` -> `RUNNING` — https://github.com/PrefectHQ/prefect/pull/8802 + +- Fix blocking file read in async method `Deployment.load_from_yaml` — +- Allow tasks and flows to make redundant transitions such as `RUNNING` -> `RUNNING` — ### Experimental -- Enable setting environment variables for worker submitted flow runs — https://github.com/PrefectHQ/prefect/pull/8706 -- Add `--work-queue` option to worker CLI — https://github.com/PrefectHQ/prefect/pull/8771 -- Add artifact description column — https://github.com/PrefectHQ/prefect/pull/8805 -- Format types in result descriptions as code — https://github.com/PrefectHQ/prefect/pull/8808 -- Add artifacts for unpersisted results — https://github.com/PrefectHQ/prefect/pull/8759 -- Update default result descriptions — https://github.com/PrefectHQ/prefect/pull/8772 + +- Enable setting environment variables for worker submitted flow runs — +- Add `--work-queue` option to worker CLI — +- Add artifact description column — +- Format types in result descriptions as code — +- Add artifacts for unpersisted results — +- Update default result descriptions — ### Documentation -- Update workspace roles table to emphasize differences between roles — https://github.com/PrefectHQ/prefect/pull/8787 -- Add webhook block docs — https://github.com/PrefectHQ/prefect/pull/8773 -- Update info on Ray's support for hardware and software — https://github.com/PrefectHQ/prefect/pull/8811 + +- Update workspace roles table to emphasize differences between roles — +- Add webhook block docs — +- Update info on Ray's support for hardware and software — ### Helm chart -- Helm charts are now automatically published on each Prefect release — https://github.com/PrefectHQ/prefect/pull/8776 + +- Helm charts are now automatically published on each Prefect release — ### Contributors + - @devanshdoshi9 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.8.5...2.8.6 +**All changes**: ## Release 2.8.5 ### Enhancements -- Add an endpoint to retrieve data from the collection registry — https://github.com/PrefectHQ/prefect/pull/8685 -- Remove deployment flow run foreign key to speed up deployment deletion — https://github.com/PrefectHQ/prefect/pull/8684 + +- Add an endpoint to retrieve data from the collection registry — +- Remove deployment flow run foreign key to speed up deployment deletion — ### Fixes -- Fix `prefect cloud login` detection of "ENTER" on some machines — https://github.com/PrefectHQ/prefect/pull/8705 -- Fix Kubernetes job watch timeout request error by rounding floats — https://github.com/PrefectHQ/prefect/pull/8733 -- Fix flow load errors by excluding fsspec `2023.3.0` during requirements installation — https://github.com/PrefectHQ/prefect/pull/8757 -- Fix Deployment and Concurrency Limit pages tabs — https://github.com/PrefectHQ/prefect/pull/8716 -- Add tests for base exceptions and calls — https://github.com/PrefectHQ/prefect/pull/8734 + +- Fix `prefect cloud login` detection of "ENTER" on some machines — +- Fix Kubernetes job watch timeout request error by rounding floats — +- Fix flow load errors by excluding fsspec `2023.3.0` during requirements installation — +- Fix Deployment and Concurrency Limit pages tabs — +- Add tests for base exceptions and calls — ### Experimental -- Refactor supervisor API to allow configuration — https://github.com/PrefectHQ/prefect/pull/8695 -- Consolidate `WorkItem` and `Call` classes — https://github.com/PrefectHQ/prefect/pull/8697 -- Use `PREFECT_API_URL` when initializing the events client — https://github.com/PrefectHQ/prefect/pull/8704 -- Refactor supervisors to interact directly with "Worker" threads — https://github.com/PrefectHQ/prefect/pull/8714 -- Add chaining to cancel contexts — https://github.com/PrefectHQ/prefect/pull/8719 -- Add portal abstract base for worker threads and supervisors — https://github.com/PrefectHQ/prefect/pull/8717 -- Fix bugs in supervisors implementation — https://github.com/PrefectHQ/prefect/pull/8718 -- Refactor concurrency module and add documentation — https://github.com/PrefectHQ/prefect/pull/8724 -- Update block event resource IDs to use block-document id instead of name. — https://github.com/PrefectHQ/prefect/pull/8730 -- Add cancellation reporting to calls and waiters — https://github.com/PrefectHQ/prefect/pull/8731 -- Add worker command output when applying deployments with a work pool — https://github.com/PrefectHQ/prefect/pull/8725 -- Add support for float timeouts using alarms — https://github.com/PrefectHQ/prefect/pull/8737 -- Add the ability to discover type from work pool when starting a worker — https://github.com/PrefectHQ/prefect/pull/8711 -- Add basic event instrumentation to blocks — https://github.com/PrefectHQ/prefect/pull/8686 + +- Refactor supervisor API to allow configuration — +- Consolidate `WorkItem` and `Call` classes — +- Use `PREFECT_API_URL` when initializing the events client — +- Refactor supervisors to interact directly with "Worker" threads — +- Add chaining to cancel contexts — +- Add portal abstract base for worker threads and supervisors — +- Fix bugs in supervisors implementation — +- Refactor concurrency module and add documentation — +- Update block event resource IDs to use block-document id instead of name — +- Add cancellation reporting to calls and waiters — +- Add worker command output when applying deployments with a work pool — +- Add support for float timeouts using alarms — +- Add the ability to discover type from work pool when starting a worker — +- Add basic event instrumentation to blocks — ### Documentation -- Corrected typo in Storage.md — https://github.com/PrefectHQ/prefect/pull/8692 -- Fix `prefect flow-run cancel` help — https://github.com/PrefectHQ/prefect/pull/8755 + +- Corrected typo in Storage.md — +- Fix `prefect flow-run cancel` help — ### Contributors -* @Zesky665 made their first contribution in https://github.com/PrefectHQ/prefect/pull/8692 -* @predatorprasad made their first contribution in https://github.com/PrefectHQ/prefect/pull/8755 +- @Zesky665 made their first contribution in + +- @predatorprasad made their first contribution in + +**All changes**: ## Release 2.8.4 ### Enhancements -- Enable `DefaultAzureCredential` authentication for Azure filesystem block — https://github.com/PrefectHQ/prefect/pull/7513 -- Add support for yaml config strings to `KubernetesClusterConfig` — https://github.com/PrefectHQ/prefect/pull/8643 -- Add `--description` flag to `prefect deployment build` CLI command — https://github.com/PrefectHQ/prefect/pull/8603 -- Handle SIGTERM received by server gracefully — https://github.com/PrefectHQ/prefect/pull/7948 -- Optimize database query performance by changing SQLAlchemy lazy loads from `joined` to `selectin` — https://github.com/PrefectHQ/prefect/pull/8659 -- Add clarifying modal to the task run page in the UI — https://github.com/PrefectHQ/prefect/pull/8295 + +- Enable `DefaultAzureCredential` authentication for Azure filesystem block — +- Add support for yaml config strings to `KubernetesClusterConfig` — +- Add `--description` flag to `prefect deployment build` CLI command — +- Handle SIGTERM received by server gracefully — +- Optimize database query performance by changing SQLAlchemy lazy loads from `joined` to `selectin` — +- Add clarifying modal to the task run page in the UI — ### Fixes -- Ensure flow parameters default values are present during deployment runs — https://github.com/PrefectHQ/prefect/pull/8666 -- Use a monotonic clock for Kubernetes job watch timeout deadline calculation — https://github.com/PrefectHQ/prefect/pull/8680 -- Fix version misaligned on the settings page in the UI — https://github.com/PrefectHQ/prefect/pull/8676 + +- Ensure flow parameters default values are present during deployment runs — +- Use a monotonic clock for Kubernetes job watch timeout deadline calculation — +- Fix version misaligned on the settings page in the UI — ### Experimental -- Refactor supervisors to manage submission — https://github.com/PrefectHQ/prefect/pull/8631 -- Improve supervisor repr for debugging — https://github.com/PrefectHQ/prefect/pull/8633 -- Add timeout support to supervisors — https://github.com/PrefectHQ/prefect/pull/8649 -- Track flow run id when generating task run results — https://github.com/PrefectHQ/prefect/pull/8674 -- Create `EventsWorker` to manage client lifecycle and abstract async nature — https://github.com/PrefectHQ/prefect/pull/8673 + +- Refactor supervisors to manage submission — +- Improve supervisor repr for debugging — +- Add timeout support to supervisors — +- Track flow run id when generating task run results — +- Create `EventsWorker` to manage client lifecycle and abstract async nature — ### Documentation -- Add tutorial for running an agent on Azure Container Instances — https://github.com/PrefectHQ/prefect/pull/8620 -- Add security headers for docs — https://github.com/PrefectHQ/prefect/pull/8655 -- Add markdown link fix in orchestration docs — https://github.com/PrefectHQ/prefect/pull/8660 + +- Add tutorial for running an agent on Azure Container Instances — +- Add security headers for docs — +- Add markdown link fix in orchestration docs — ## New Contributors -* @samdyzon made their first contribution in https://github.com/PrefectHQ/prefect/pull/7513 -* @mjschock made their first contribution in https://github.com/PrefectHQ/prefect/pull/8660 -* @jcorrado76 made their first contribution in https://github.com/PrefectHQ/prefect/pull/8603 -* @scharlottej13 made their first contribution in https://github.com/PrefectHQ/prefect/pull/8669 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.8.3...2.8.4 +- @samdyzon made their first contribution in + +- @mjschock made their first contribution in +- @jcorrado76 made their first contribution in +- @scharlottej13 made their first contribution in + +**All changes**: ## Release 2.8.3 ### `on_completion` and `on_failure` hooks for flows and tasks -With this release you can now add client-side hooks that will be called when your flow or task enters a `Completed` or `Failed` state. This is great for any case where you want to execute code without involvement of the Prefect API. + +With this release you can now add client-side hooks that will be called when your flow or task enters a `Completed` or `Failed` state. This is great for any case where you want to execute code without involvement of the Prefect API. Both flows and tasks include `on_completion` and `on_failure` options where a list of callable hooks can be provided. The callable will receive three arguments: + - `flow`, `flow_run`, and `state` in the case of a flow hook - `task`, `task_run`, and `state` in the case of a task hook @@ -598,14 +5424,14 @@ For example, here we add completion hooks to a flow and a task: from prefect import task, flow def my_completion_task_hook_1(task, task_run, state): - print("This is the first hook - Task completed!!!") - + print("This is the first hook — Task completed!!!") + def my_completion_task_hook_2(task, task_run, state): - print("This is the second hook - Task completed!!!") - + print("This is the second hook — Task completed!!!") + def my_completion_flow_hook(flow, flow_run, state): print("Flow completed!!!") - + @task(on_completion=[my_completion_task_hook_1, my_completion_task_hook_2]) def my_task(): print("This is the task!") @@ -642,53 +5468,58 @@ if __name__ == "__main__": ``` ### Enhancements -- Update `quote` handling in input resolution to skip descending into the quoted expression — https://github.com/PrefectHQ/prefect/pull/8576 -- Add light and dark mode color and contrast enhancements to UI — https://github.com/PrefectHQ/prefect/pull/8629 + +- Update `quote` handling in input resolution to skip descending into the quoted expression — +- Add light and dark mode color and contrast enhancements to UI — ### Fixes -- Fix `Task.map` type hint for type-checker compatibility with async tasks — https://github.com/PrefectHQ/prefect/pull/8607 -- Update Docker container name sanitization to handle "ce" and "ee" when checking Docker version — https://github.com/PrefectHQ/prefect/pull/8588 -- Fix Kubernetes Job watch timeout behavior when streaming logs — https://github.com/PrefectHQ/prefect/pull/8618 -- Fix date range filter selection on the flow runs UI page — https://github.com/PrefectHQ/prefect/pull/8616 -- Fix Kubernetes not streaming logs when using multiple containers in Job — https://github.com/PrefectHQ/prefect/pull/8430 + +- Fix `Task.map` type hint for type-checker compatibility with async tasks — +- Update Docker container name sanitization to handle "ce" and "ee" when checking Docker version — +- Fix Kubernetes Job watch timeout behavior when streaming logs — +- Fix date range filter selection on the flow runs UI page — +- Fix Kubernetes not streaming logs when using multiple containers in Job — ### Experimental -- Update worker variable typing for clearer display in the UI — https://github.com/PrefectHQ/prefect/pull/8613 -- Update `BaseWorker` to ignore flow runs with associated storage block — https://github.com/PrefectHQ/prefect/pull/8619 -- Add experimental API for artifacts — https://github.com/PrefectHQ/prefect/pull/8404 + +- Update worker variable typing for clearer display in the UI — +- Update `BaseWorker` to ignore flow runs with associated storage block — +- Add experimental API for artifacts — ### Documentation -- Add documentation for resuming a flow run via the UI — https://github.com/PrefectHQ/prefect/pull/8621 -- Add [`prefect-sifflet`](https://siffletapp.github.io/prefect-sifflet/) to Collections catalog — https://github.com/PrefectHQ/prefect/pull/8599 +- Add documentation for resuming a flow run via the UI — +- Add [`prefect-sifflet`](https://siffletapp.github.io/prefect-sifflet/) to Collections catalog — ### Contributors -- @jefflaporte made their first contribution in https://github.com/PrefectHQ/prefect/pull/8430 -- @AzemaBaptiste made their first contribution in https://github.com/PrefectHQ/prefect/pull/8599 + +- @jefflaporte made their first contribution in +- @AzemaBaptiste made their first contribution in - @darrida -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.8.2...2.8.3 +**All changes**: ## Release 2.8.2 ### Fixes -- Reenable plugin loading in `prefect` module init — https://github.com/PrefectHQ/prefect/pull/8569 + +- Re-enable plugin loading in `prefect` module init — ### Documentation -- Fix logging format override example — https://github.com/PrefectHQ/prefect/pull/8565 +- Fix logging format override example — ### Experimental -- Add events client to `PrefectClient` — https://github.com/PrefectHQ/prefect/pull/8546 +- Add events client to `PrefectClient` — -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.8.1...2.8.2 +**All changes**: ## Release 2.8.1 ### New names, same behavior -We knew we were onto something big when we [first announced Prefect Orion](https://www.prefect.io/guide/blog/announcing-prefect-orion/), our second-generation orchestration engine, but we didn't know just how big. Orion's foundational design principles of dynamism, developer experience, and observability have shaped the Prefect 2 codebase to such an extent that it's difficult to tell where Orion ends and other components begin. For example, it's been challenging to communicate clearly about the “Orion API” (the orchestration API), an “Orion Server” (a hosted instance of the API and UI), and individual components of that server. +We knew we were onto something big when we [first announced Prefect Orion](https://www.prefect.io/guide/blog/announcing-prefect-orion/), our second-generation orchestration engine, but we didn't know just how big. Orion's foundational design principles of dynamism, developer experience, and observability have shaped the Prefect 2 codebase to such an extent that it's difficult to tell where Orion ends and other components begin. For example, it's been challenging to communicate clearly about the “Orion API” (the orchestration API), an “Orion Server” (a hosted instance of the API and UI), and individual components of that server. With this release, **we've removed references to "Orion" and replaced them with more explicit, conventional nomenclature throughout the codebase**. All changes are **fully backwards compatible** and will follow our standard deprecation cycle of six months. These changes clarify the function of various components, commands, variables, and more. @@ -696,83 +5527,89 @@ See the [deprecated section](https://github.com/PrefectHQ/prefect/blob/main/RELE Note: Many settings have been renamed but your old settings will be respected. To automatically convert all of the settings in your current profile to the new names, run the `prefect config validate` command. - ### Enhancements -- Add `MattermostWebhook` notification block — https://github.com/PrefectHQ/prefect/pull/8341 -- Add ability to pass in RRule string to `--rrule` option in `prefect set-schedule` command - https://github.com/PrefectHQ/prefect/pull/8543 + +- Add `MattermostWebhook` notification block — +- Add ability to pass in RRule string to `--rrule` option in `prefect set-schedule` command — ### Fixes -- Fix default deployment parameters not populating in the UI — https://github.com/PrefectHQ/prefect/pull/8518 -- Fix ability to use anchor date when setting an interval schedule with the `prefect set-schedule` command — https://github.com/PrefectHQ/prefect/pull/8524 + +- Fix default deployment parameters not populating in the UI — +- Fix ability to use anchor date when setting an interval schedule with the `prefect set-schedule` command — ### Documentation -- Add table listing available blocks — https://github.com/PrefectHQ/prefect/pull/8443 -- Fix work pools documentation links — https://github.com/PrefectHQ/prefect/pull/8477 -- Add examples for custom automation triggers — https://github.com/PrefectHQ/prefect/pull/8476 -- Add webhooks to Automations docs — https://github.com/PrefectHQ/prefect/pull/8514 -- Document Prefect Cloud API rate limits — https://github.com/PrefectHQ/prefect/pull/8529 -### Experimental -- Add metadata fields to `BaseWorker` — https://github.com/PrefectHQ/prefect/pull/8527 -- Add default artifact metadata to `LiteralResults` and `PersistedResults` — https://github.com/PrefectHQ/prefect/pull/8501 +- Add table listing available blocks — +- Fix work pools documentation links — +- Add examples for custom automation triggers — +- Add webhooks to Automations docs — +- Document Prefect Cloud API rate limits — -### Deprecated -- Default SQLite database name changed from `orion.db` to `prefect.db` -- Logger `prefect.orion` renamed to `prefect.server` -- Constant `ORION_API_VERSION` renamed to `SERVER_API_VERSION` -- Kubernetes deployment template application name changed from `prefect-orion` to `prefect-server` -- Command `prefect kubernetes manifest orion` renamed to `prefect kubernetes manifest server` -- Log config handler `orion` renamed to `api` -- Class `OrionLogWorker` renamed to `APILogWorker` -- Class `OrionHandler` renamed to `APILogHandler` -- Directory `orion-ui` renamed to `ui` -- Class `OrionRouter` renamed to `PrefectRouter` -- Class `OrionAPIRoute` renamed to `PrefectAPIRoute` -- Class `OrionDBInterface` renamed to `PrefectDBInterface` -- Class `OrionClient` renamed to `PrefectClient` -- Module `prefect.client.orion` renamed to `prefect.client.orchestration` -- Command group `prefect orion` renamed to `prefect server` -- Module `prefect.orion` renamed to `prefect.server` -- The following settings have been renamed: - - `PREFECT_LOGGING_ORION_ENABLED` → `PREFECT_LOGGING_TO_API_ENABLED` - - `PREFECT_LOGGING_ORION_BATCH_INTERVAL` → `PREFECT_LOGGING_TO_API_BATCH_INTERVAL` - - `PREFECT_LOGGING_ORION_BATCH_SIZE` → `PREFECT_LOGGING_TO_API_BATCH_SIZE` - - `PREFECT_LOGGING_ORION_MAX_LOG_SIZE` → `PREFECT_LOGGING_TO_API_MAX_LOG_SIZE` - - `PREFECT_LOGGING_ORION_WHEN_MISSING_FLOW` → `PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW` - - `PREFECT_ORION_BLOCKS_REGISTER_ON_START` → `PREFECT_API_BLOCKS_REGISTER_ON_START` - - `PREFECT_ORION_DATABASE_CONNECTION_URL` → `PREFECT_API_DATABASE_CONNECTION_URL` - - `PREFECT_ORION_DATABASE_MIGRATE_ON_START` → `PREFECT_API_DATABASE_MIGRATE_ON_START` - - `PREFECT_ORION_DATABASE_TIMEOUT` → `PREFECT_API_DATABASE_TIMEOUT` - - `PREFECT_ORION_DATABASE_CONNECTION_TIMEOUT` → `PREFECT_API_DATABASE_CONNECTION_TIMEOUT` - - `PREFECT_ORION_SERVICES_SCHEDULER_LOOP_SECONDS` → `PREFECT_API_SERVICES_SCHEDULER_LOOP_SECONDS` - - `PREFECT_ORION_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE` → `PREFECT_API_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE` - - `PREFECT_ORION_SERVICES_SCHEDULER_MAX_RUNS` → `PREFECT_API_SERVICES_SCHEDULER_MAX_RUNS` - - `PREFECT_ORION_SERVICES_SCHEDULER_MIN_RUNS` → `PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS` - - `PREFECT_ORION_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME` → `PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME` - - `PREFECT_ORION_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME` → `PREFECT_API_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME` - - `PREFECT_ORION_SERVICES_SCHEDULER_INSERT_BATCH_SIZE` → `PREFECT_API_SERVICES_SCHEDULER_INSERT_BATCH_SIZE` - - `PREFECT_ORION_SERVICES_LATE_RUNS_LOOP_SECONDS` → `PREFECT_API_SERVICES_LATE_RUNS_LOOP_SECONDS` - - `PREFECT_ORION_SERVICES_LATE_RUNS_AFTER_SECONDS` → `PREFECT_API_SERVICES_LATE_RUNS_AFTER_SECONDS` - - `PREFECT_ORION_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS` → `PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS` - - `PREFECT_ORION_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS` → `PREFECT_API_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS` - - `PREFECT_ORION_API_DEFAULT_LIMIT` → `PREFECT_API_DEFAULT_LIMIT` - - `PREFECT_ORION_API_HOST` → `PREFECT_SERVER_API_HOST` - - `PREFECT_ORION_API_PORT` → `PREFECT_SERVER_API_PORT` - - `PREFECT_ORION_API_KEEPALIVE_TIMEOUT` → `PREFECT_SERVER_API_KEEPALIVE_TIMEOUT` - - `PREFECT_ORION_UI_ENABLED` → `PREFECT_UI_ENABLED` - - `PREFECT_ORION_UI_API_URL` → `PREFECT_UI_API_URL` - - `PREFECT_ORION_ANALYTICS_ENABLED` → `PREFECT_SERVER_ANALYTICS_ENABLED` - - `PREFECT_ORION_SERVICES_SCHEDULER_ENABLED` → `PREFECT_API_SERVICES_SCHEDULER_ENABLED` - - `PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED` → `PREFECT_API_SERVICES_LATE_RUNS_ENABLED` - - `PREFECT_ORION_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED` → `PREFECT_API_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED` - - `PREFECT_ORION_SERVICES_PAUSE_EXPIRATIONS_ENABLED` → `PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_ENABLED` - - `PREFECT_ORION_TASK_CACHE_KEY_MAX_LENGTH` → `PREFECT_API_TASK_CACHE_KEY_MAX_LENGTH` - - `PREFECT_ORION_SERVICES_CANCELLATION_CLEANUP_ENABLED` → `PREFECT_API_SERVICES_CANCELLATION_CLEANUP_ENABLED` +### Experimental +- Add metadata fields to `BaseWorker` — +- Add default artifact metadata to `LiteralResults` and `PersistedResults` — + +### Deprecated + +- Default SQLite database name changed from `orion.db` to `prefect.db` +- Logger `prefect.orion` renamed to `prefect.server` +- Constant `ORION_API_VERSION` renamed to `SERVER_API_VERSION` +- Kubernetes deployment template application name changed from `prefect-orion` to `prefect-server` +- Command `prefect kubernetes manifest orion` renamed to `prefect kubernetes manifest server` +- Log config handler `orion` renamed to `api` +- Class `OrionLogWorker` renamed to `APILogWorker` +- Class `OrionHandler` renamed to `APILogHandler` +- Directory `orion-ui` renamed to `ui` +- Class `OrionRouter` renamed to `PrefectRouter` +- Class `OrionAPIRoute` renamed to `PrefectAPIRoute` +- Class `OrionDBInterface` renamed to `PrefectDBInterface` +- Class `OrionClient` renamed to `PrefectClient` +- Module `prefect.client.orion` renamed to `prefect.client.orchestration` +- Command group `prefect orion` renamed to `prefect server` +- Module `prefect.orion` renamed to `prefect.server` +- The following settings have been renamed: + — `PREFECT_LOGGING_ORION_ENABLED` → `PREFECT_LOGGING_TO_API_ENABLED` + — `PREFECT_LOGGING_ORION_BATCH_INTERVAL` → `PREFECT_LOGGING_TO_API_BATCH_INTERVAL` + — `PREFECT_LOGGING_ORION_BATCH_SIZE` → `PREFECT_LOGGING_TO_API_BATCH_SIZE` + — `PREFECT_LOGGING_ORION_MAX_LOG_SIZE` → `PREFECT_LOGGING_TO_API_MAX_LOG_SIZE` + — `PREFECT_LOGGING_ORION_WHEN_MISSING_FLOW` → `PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW` + — `PREFECT_ORION_BLOCKS_REGISTER_ON_START` → `PREFECT_API_BLOCKS_REGISTER_ON_START` + — `PREFECT_ORION_DATABASE_CONNECTION_URL` → `PREFECT_API_DATABASE_CONNECTION_URL` + — `PREFECT_ORION_DATABASE_MIGRATE_ON_START` → `PREFECT_API_DATABASE_MIGRATE_ON_START` + — `PREFECT_ORION_DATABASE_TIMEOUT` → `PREFECT_API_DATABASE_TIMEOUT` + — `PREFECT_ORION_DATABASE_CONNECTION_TIMEOUT` → `PREFECT_API_DATABASE_CONNECTION_TIMEOUT` + — `PREFECT_ORION_SERVICES_SCHEDULER_LOOP_SECONDS` → `PREFECT_API_SERVICES_SCHEDULER_LOOP_SECONDS` + — `PREFECT_ORION_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE` → `PREFECT_API_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE` + — `PREFECT_ORION_SERVICES_SCHEDULER_MAX_RUNS` → `PREFECT_API_SERVICES_SCHEDULER_MAX_RUNS` + — `PREFECT_ORION_SERVICES_SCHEDULER_MIN_RUNS` → `PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS` + — `PREFECT_ORION_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME` → `PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME` + — `PREFECT_ORION_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME` → `PREFECT_API_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME` + — `PREFECT_ORION_SERVICES_SCHEDULER_INSERT_BATCH_SIZE` → `PREFECT_API_SERVICES_SCHEDULER_INSERT_BATCH_SIZE` + — `PREFECT_ORION_SERVICES_LATE_RUNS_LOOP_SECONDS` → `PREFECT_API_SERVICES_LATE_RUNS_LOOP_SECONDS` + — `PREFECT_ORION_SERVICES_LATE_RUNS_AFTER_SECONDS` → `PREFECT_API_SERVICES_LATE_RUNS_AFTER_SECONDS` + — `PREFECT_ORION_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS` → `PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_LOOP_SECONDS` + — `PREFECT_ORION_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS` → `PREFECT_API_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS` + — `PREFECT_ORION_API_DEFAULT_LIMIT` → `PREFECT_API_DEFAULT_LIMIT` + — `PREFECT_ORION_API_HOST` → `PREFECT_SERVER_API_HOST` + — `PREFECT_ORION_API_PORT` → `PREFECT_SERVER_API_PORT` + — `PREFECT_ORION_API_KEEPALIVE_TIMEOUT` → `PREFECT_SERVER_API_KEEPALIVE_TIMEOUT` + — `PREFECT_ORION_UI_ENABLED` → `PREFECT_UI_ENABLED` + — `PREFECT_ORION_UI_API_URL` → `PREFECT_UI_API_URL` + — `PREFECT_ORION_ANALYTICS_ENABLED` → `PREFECT_SERVER_ANALYTICS_ENABLED` + — `PREFECT_ORION_SERVICES_SCHEDULER_ENABLED` → `PREFECT_API_SERVICES_SCHEDULER_ENABLED` + — `PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED` → `PREFECT_API_SERVICES_LATE_RUNS_ENABLED` + — `PREFECT_ORION_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED` → `PREFECT_API_SERVICES_FLOW_RUN_NOTIFICATIONS_ENABLED` + — `PREFECT_ORION_SERVICES_PAUSE_EXPIRATIONS_ENABLED` → `PREFECT_API_SERVICES_PAUSE_EXPIRATIONS_ENABLED` + — `PREFECT_ORION_TASK_CACHE_KEY_MAX_LENGTH` → `PREFECT_API_TASK_CACHE_KEY_MAX_LENGTH` + — `PREFECT_ORION_SERVICES_CANCELLATION_CLEANUP_ENABLED` → `PREFECT_API_SERVICES_CANCELLATION_CLEANUP_ENABLED` ### Contributors -- @qheuristics made their first contribution in https://github.com/PrefectHQ/prefect/pull/8478 -- @KernelErr made their first contribution in https://github.com/PrefectHQ/prefect/pull/8485 + +- @qheuristics made their first contribution in +- @KernelErr made their first contribution in + +**All changes**: ## Release 2.8.0 @@ -803,7 +5640,7 @@ Deployments can now be assigned to a work queue in a specific work pool. Use the ```bash prefect deployment build \ --pool my-pool \ - --queue high-priority \ + --queue high-priority \ --name high-priority \ high_priority_flow.py:high_priority_flow ``` @@ -823,29 +5660,35 @@ prefect agent start --pool my-pool --queue high-priority To learn more about work pools, check out the [docs](https://docs.prefect.io/concepts/work-pools/) or see the relevant pull requests: ### Enhancements -- Add ability to filter on work pool and queue when querying flow runs — https://github.com/PrefectHQ/prefect/pull/8459 -- Ensure agent respects work queue priority — https://github.com/PrefectHQ/prefect/pull/8458 -- Add ability to create a flow run from the UI with parameters from a previous run — https://github.com/PrefectHQ/prefect/pull/8405 -- Add generic `Webhook` block — https://github.com/PrefectHQ/prefect/pull/8401 -- Add override customizations functionality to deployments via CLI — https://github.com/PrefectHQ/prefect/pull/8349 -- Add ability to reset concurrency limits in CLI to purge existing runs from taking concurrency slots — https://github.com/PrefectHQ/prefect/pull/8408 -- Ensure matching flow run state information in UI — https://github.com/PrefectHQ/prefect/pull/8441 -- Customize CLI block registration experience based on `PREFECT_UI_URL` — https://github.com/PrefectHQ/prefect/pull/8438 + +- Add ability to filter on work pool and queue when querying flow runs — +- Ensure agent respects work queue priority — +- Add ability to create a flow run from the UI with parameters from a previous run — +- Add generic `Webhook` block — +- Add override customizations functionality to deployments via CLI — +- Add ability to reset concurrency limits in CLI to purge existing runs from taking concurrency slots — +- Ensure matching flow run state information in UI — +- Customize CLI block registration experience based on `PREFECT_UI_URL` — ### Fixes -- Fix `prefect dev start` command — https://github.com/PrefectHQ/prefect/pull/8176 -- Fix display of long log messages when in the UI — https://github.com/PrefectHQ/prefect/pull/8449 -- Update `get_run_logger` to accomodate returning `logging.LoggerAdapter` — https://github.com/PrefectHQ/prefect/pull/8422 -- Restore Prefect wrapper around HTTP errors for nicer error messages — https://github.com/PrefectHQ/prefect/pull/8391 -- Fix display of work pool flow run filter in the UI — https://github.com/PrefectHQ/prefect/pull/8453 + +- Fix `prefect dev start` command — +- Fix display of long log messages when in the UI — +- Update `get_run_logger` to accommodate returning `logging.LoggerAdapter` — +- Restore Prefect wrapper around HTTP errors for nicer error messages — +- Fix display of work pool flow run filter in the UI — ### Documentation -- Update Infrastructure concept documentation with `extra-pip-package` example and updated `deployment.yaml` — https://github.com/PrefectHQ/prefect/pull/8465 -- Add work pools documentation - https://github.com/PrefectHQ/prefect/pull/8377 + +- Update Infrastructure concept documentation with `extra-pip-package` example and updated `deployment.yaml` — +- Add work pools documentation — ### Contributors + - @carderne +**All changes**: + ## Release 2.7.12 ### Custom flow and task run names 🎉 @@ -884,25 +5727,27 @@ def my_flow(name: str, date: datetime): my_flow() ``` -See [the docs](https://docs.prefect.io/tutorials/flow-task-config/#basic-flow-configuration) or https://github.com/PrefectHQ/prefect/pull/8378 for more details. +See [the docs](https://docs.prefect.io/tutorials/tasks/#basic-flow-configuration) or for more details. ### Enhancements -- Update the deployment page to show the runs tab before the description — https://github.com/PrefectHQ/prefect/pull/8398 + +- Update the deployment page to show the runs tab before the description — ### Fixes -- Fix artifact migration to only include states that have non-null data — https://github.com/PrefectHQ/prefect/pull/8420 -- Fix error when using `prefect work-queue ls` without enabling work pools — https://github.com/PrefectHQ/prefect/pull/8427 + +- Fix artifact migration to only include states that have non-null data — +- Fix error when using `prefect work-queue ls` without enabling work pools — ### Experimental -- Add error when attempting to apply a deployment to a work pool that hasn't been created yet — https://github.com/PrefectHQ/prefect/pull/8413 -- Create queues in the correct work pool when applying a deployment for a queue that hasn't been created yet — https://github.com/PrefectHQ/prefect/pull/8413 -### Contributors -- @NodeJSmith +- Add error when attempting to apply a deployment to a work pool that hasn't been created yet — +- Create queues in the correct work pool when applying a deployment for a queue that hasn't been created yet — -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.7.11...2.7.12 +### Contributors +- @NodeJSmith +**All changes**: ## Release 2.7.11 @@ -931,10 +5776,10 @@ We want to see messages from `my-logger` in the UI. We can do this with `PREFECT $ PREFECT_LOGGING_EXTRA_LOGGERS="my-logger" python example.py example.py:6: UserWarning: Logger 'my-logger' attempted to send logs to Orion without a flow run id. The Orion log handler can only send logs within flow run contexts unless the flow run id is manually provided. my_logger.info("outside the flow") -18:09:30.518 | INFO | my-logger - outside the flow -18:09:31.028 | INFO | prefect.engine - Created flow run 'elated-curassow' for flow 'foo' -18:09:31.104 | INFO | my-logger - inside the flow -18:09:31.179 | INFO | Flow run 'elated-curassow' - Finished in state Completed() +18:09:30.518 | INFO | my-logger — outside the flow +18:09:31.028 | INFO | prefect.engine — Created flow run 'elated-curassow' for flow 'foo' +18:09:31.104 | INFO | my-logger — inside the flow +18:09:31.179 | INFO | Flow run 'elated-curassow' — Finished in state Completed() ``` Notice, we got a warning. This helps avoid confusion when certain logs don't appear in the UI, but if you understand that you can turn it off: @@ -946,48 +5791,54 @@ Updated profile 'default'. ``` ### Enhancements -- Update default task run name to exclude hash of task key — https://github.com/PrefectHQ/prefect/pull/8292 -- Update Docker images to update preinstalled packages on build — https://github.com/PrefectHQ/prefect/pull/8288 -- Add PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW to allow loggers to be used outside of flows — https://github.com/PrefectHQ/prefect/pull/8311 -- Display Runs before Deployments on flow pages - https://github.com/PrefectHQ/prefect/pull/8386 -- Clearify output CLI message when switching profiles - https://github.com/PrefectHQ/prefect/pull/8383 + +- Update default task run name to exclude hash of task key — +- Update Docker images to update preinstalled packages on build — +- Add PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW to allow loggers to be used outside of flows — +- Display Runs before Deployments on flow pages — +- Clarify output CLI message when switching profiles — ### Fixes -- Fix bug preventing agents from properly updating Cancelling runs to a Cancelled state — https://github.com/PrefectHQ/prefect/pull/8315 -- Fix bug where Kubernetes job monitoring exited early when no timeout was given — https://github.com/PrefectHQ/prefect/pull/8350 + +- Fix bug preventing agents from properly updating Cancelling runs to a Cancelled state — +- Fix bug where Kubernetes job monitoring exited early when no timeout was given — ### Experimental + - We're working on work pools, groups of work queues. Together, work pools & queues give you greater flexibility and control in organizing and prioritizing work. - - Add updates to work queue `last_polled` time when polling work pools — https://github.com/PrefectHQ/prefect/pull/8338 - - Add CLI support for work pools — https://github.com/PrefectHQ/prefect/pull/8259 - - Add fields to `work_queue` table to accommodate work pools — https://github.com/PrefectHQ/prefect/pull/8264 - - Add work queue data migration — https://github.com/PrefectHQ/prefect/pull/8327 - - Fix default value for priority on `WorkQueue` core schema — https://github.com/PrefectHQ/prefect/pull/8373 -- Add ability to exclude experimental fields in API calls — https://github.com/PrefectHQ/prefect/pull/8274, https://github.com/PrefectHQ/prefect/pull/8331 -- Add Prefect Cloud Events schema and clients — https://github.com/PrefectHQ/prefect/pull/8357 - -### Documentation -- Add git commands to Prefect Recipes contribution page — https://github.com/PrefectHQ/prefect/pull/8283 -- Add `retry_delay_seconds` and `exponential_backoff` examples to Tasks retries documentation — https://github.com/PrefectHQ/prefect/pull/8280 -- Add role permissions regarding block secrets — https://github.com/PrefectHQ/prefect/pull/8309 -- Add getting started tutorial video to Prefect Cloud Quickstart — https://github.com/PrefectHQ/prefect/pull/8336 -- Add tips for re-registering blocks from Prefect Collections — https://github.com/PrefectHQ/prefect/pull/8333 -- Improve examples for Kubernetes infrastructure overrides — https://github.com/PrefectHQ/prefect/pull/8312 -- Add mention of reverse proxy for `PREFECT_API_URL` config — https://github.com/PrefectHQ/prefect/pull/8240 -- Fix unused Cloud Getting Started page — https://github.com/PrefectHQ/prefect/pull/8291 -- Fix Prefect Cloud typo in FAQ — https://github.com/PrefectHQ/prefect/pull/8317 + — Add updates to work queue `last_polled` time when polling work pools — + — Add CLI support for work pools — + — Add fields to `work_queue` table to accommodate work pools — + — Add work queue data migration — + — Fix default value for priority on `WorkQueue` core schema — +- Add ability to exclude experimental fields in API calls — , +- Add Prefect Cloud Events schema and clients — + +### Documentation + +- Add git commands to Prefect Recipes contribution page — +- Add `retry_delay_seconds` and `exponential_backoff` examples to Tasks retries documentation — +- Add role permissions regarding block secrets — +- Add getting started tutorial video to Prefect Cloud Quickstart — +- Add tips for re-registering blocks from Prefect Collections — +- Improve examples for Kubernetes infrastructure overrides — +- Add mention of reverse proxy for `PREFECT_API_URL` config — +- Fix unused Cloud Getting Started page — +- Fix Prefect Cloud typo in FAQ — ### Collections -- Add `ShellOperation` implementing `JobBlock` in `v0.1.4` release of `prefect-shell` - https://github.com/PrefectHQ/prefect-shell/pull/55 -- Add `CensusSync` implementing `JobBlock` in `v0.1.1` release of `prefect-census` - https://github.com/PrefectHQ/prefect-census/pull/15 + +- Add `ShellOperation` implementing `JobBlock` in `v0.1.4` release of `prefect-shell` — +- Add `CensusSync` implementing `JobBlock` in `v0.1.1` release of `prefect-census` — ### Contributors + - @chiaberry - @hozn - @manic-miner - @space-age-pete -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.7.10...2.7.11 +**All changes**: ## Release 2.7.10 @@ -999,10 +5850,9 @@ We added SIGTERM handling to the flow run engine. When cancellation is requested We improved our handling of runs that are in the process of cancelling. When a run is cancelled, it's first placed in a "cancelling" state then moved to a "cancelled" state when cancellation is complete. Previously, concurrency slots were released as soon as cancellation was requested. Now, the flow run will continue to occupy concurrency slots until a "cancelled" state is reached. -We added cleanup of tasks and subflows belonging to cancelled flow runs. Previously, these tasks and subflows could be left in a "running" state. This can cause problems with concurrency slot consumption and restarts, so we've added a service that updates the states of the children of recently cancelled flow runs. - -See https://github.com/PrefectHQ/prefect/pull/8126 for implementation details. +We added cleanup of tasks and subflows belonging to cancelled flow runs. Previously, these tasks and subflows could be left in a "running" state. This can cause problems with concurrency slot consumption and restarts, so we've added a service that updates the states of the children of recently cancelled flow runs. +See for implementation details. ### Multiarchitecture Docker builds @@ -1011,7 +5861,7 @@ In 2.7.8, we announced that we were publishing development Docker images, includ You can try one of the new images by including the `--platform` specifier, e.g.: ```bash -$ docker run --platform linux/arm64 --pull always prefecthq/prefect:2-latest prefect version +docker run --platform linux/arm64 --pull always prefecthq/prefect:3-latest prefect version ``` We will be publishing images for the following architectures: @@ -1022,41 +5872,47 @@ We will be publishing images for the following architectures: This should provide a significant speedup to anyone running containers on ARM64 machines (I'm looking at you, Apple M1 chips!) and reduce the complexity for our users that are deploying on different platforms. The workflow for building our images was rewritten from scratch, and it'll be easy for us to expand support to include other common platforms. Shoutout to [@ddelange](https://github.com/ddelange) who led implementation of the feature. -See https://github.com/PrefectHQ/prefect/pull/7902 for details. +See for details. ### Enhancements -- Add [`is_schedule_active` option](https://docs.prefect.io/api-ref/prefect/deployments/#prefect.deployments.Deployment) to `Deployment` class to allow control of automatic scheduling — https://github.com/PrefectHQ/prefect/pull/7430 -- Add documentation links to blocks in UI — https://github.com/PrefectHQ/prefect/pull/8210 -- Add Kubernetes kube-system permissions to Prefect agent template for retrieving UUID from kube-system namespace — https://github.com/PrefectHQ/prefect/pull/8205 -- Add support for obscuring secrets in nested block fields in the UI — https://github.com/PrefectHQ/prefect/pull/8246 -- Enable publish of multiarchitecture Docker builds on release — https://github.com/PrefectHQ/prefect/pull/7902 -- Add `CANCELLING` state type — https://github.com/PrefectHQ/prefect/pull/7794 -- Add graceful shutdown of engine on `SIGTERM` — https://github.com/PrefectHQ/prefect/pull/7887 -- Add cancellation cleanup service — https://github.com/PrefectHQ/prefect/pull/8093 -- Add `PREFECT_ORION_API_KEEPALIVE_TIMEOUT` setting to allow configuration of Uvicorn `timeout-keep-alive` setting - https://github.com/PrefectHQ/prefect/pull/8190 +- Add [`is_schedule_active` option](https://docs.prefect.io/api-ref/prefect/deployments/#prefect.deployments.Deployment) to `Deployment` class to allow control of automatic scheduling — + +- Add documentation links to blocks in UI — +- Add Kubernetes kube-system permissions to Prefect agent template for retrieving UUID from kube-system namespace — +- Add support for obscuring secrets in nested block fields in the UI — +- Enable publish of multiarchitecture Docker builds on release — +- Add `CANCELLING` state type — +- Add graceful shutdown of engine on `SIGTERM` — +- Add cancellation cleanup service — +- Add `PREFECT_ORION_API_KEEPALIVE_TIMEOUT` setting to allow configuration of Uvicorn `timeout-keep-alive` setting — ### Fixes -- Fix server compatibility with clients on 2.7.8 - https://github.com/PrefectHQ/prefect/pull/8272 -- Fix tracking of long-running Kubernetes jobs and add handling for connection failures - https://github.com/PrefectHQ/prefect/pull/8189 + +- Fix server compatibility with clients on 2.7.8 — +- Fix tracking of long-running Kubernetes jobs and add handling for connection failures — ### Experimental -- Add functionality to specify a work pool when starting an agent — https://github.com/PrefectHQ/prefect/pull/8222 -- Disable `Work Queues` tab view when work pools are enabled — https://github.com/PrefectHQ/prefect/pull/8257 -- Fix property for `WorkersTable` in UI — https://github.com/PrefectHQ/prefect/pull/8232 + +- Add functionality to specify a work pool when starting an agent — +- Disable `Work Queues` tab view when work pools are enabled — +- Fix property for `WorkersTable` in UI — ### Documentation -- [Add Prefect Cloud Quickstart tutorial](https://docs.prefect.io/ui/cloud-getting-started/) — https://github.com/PrefectHQ/prefect/pull/8227 -- Add `project_urls` to `setup.py` — https://github.com/PrefectHQ/prefect/pull/8224 -- Add configuration to `mkdocs.yml` to enable versioning at a future time - https://github.com/PrefectHQ/prefect/pull/8204 -- Improve [contributing documentation](https://docs.prefect.io/contributing/overview/) with venv instructions — https://github.com/PrefectHQ/prefect/pull/8247 -- Update documentation on [KubernetesJob options](https://docs.prefect.io/concepts/infrastructure/#kubernetesjob) — https://github.com/PrefectHQ/prefect/pull/8261 -- Update documentation on [workspace-level roles](https://docs.prefect.io/ui/roles/#workspace-level-roles) — https://github.com/PrefectHQ/prefect/pull/8263 + +- [Add Prefect Cloud Quickstart tutorial](https://docs.prefect.io/ui/cloud-getting-started/) — +- Add `project_urls` to `setup.py` — +- Add configuration to `mkdocs.yml` to enable versioning at a future time — +- Improve [contributing documentation](https://docs.prefect.io/contributing/overview/) with venv instructions — +- Update documentation on [KubernetesJob options](https://docs.prefect.io/concepts/infrastructure/#kubernetesjob) — +- Update documentation on [workspace-level roles](https://docs.prefect.io/ui/roles/#workspace-level-roles) — ### Collections -- Add [prefect-openai](https://prefecthq.github.io/prefect-openai/) to [Collections catalog](https://docs.prefect.io/collections/catalog/) — https://github.com/PrefectHQ/prefect/pull/8236 + +- Add [prefect-openai](https://prefecthq.github.io/prefect-openai/) to [Collections catalog](https://docs.prefect.io/collections/catalog/) — ### Contributors + - @ddelange - @imsurat - @Laerte @@ -1064,36 +5920,42 @@ See https://github.com/PrefectHQ/prefect/pull/7902 for details. ## Release 2.7.9 ### Enhancements -- Add `--head` flag to `flow-run logs` CLI command to limit the number of logs returned — https://github.com/PrefectHQ/prefect/pull/8003 -- Add `--num_logs` option to `flow-run logs` CLI command to specify the number of logs returned — https://github.com/PrefectHQ/prefect/pull/8003 -- Add option to filter out `.git` files when reading files with the GitHub storage block — https://github.com/PrefectHQ/prefect/pull/8193 + +- Add `--head` flag to `flow-run logs` CLI command to limit the number of logs returned — +- Add `--num_logs` option to `flow-run logs` CLI command to specify the number of logs returned — +- Add option to filter out `.git` files when reading files with the GitHub storage block — ### Fixes -- Fix bug causing failures when spawning Windows subprocesses - https://github.com/PrefectHQ/prefect/pull/8184 -- Fix possible recursive loop when blocks label themselves as both their own parent and reference — https://github.com/PrefectHQ/prefect/pull/8197 + +- Fix bug causing failures when spawning Windows subprocesses — +- Fix possible recursive loop when blocks label themselves as both their own parent and reference — ### Documentation -- Add [recipe contribution page](https://docs.prefect.io/recipes/recipes/#contributing-recipes) and [AWS Chalice](https://docs.prefect.io/recipes/recipes/#recipe-catalog) recipe — https://github.com/PrefectHQ/prefect/pull/8183 -- Add new `discourse` and `blog` admonition types — https://github.com/PrefectHQ/prefect/pull/8202 -- Update Automations and Notifications documentation — https://github.com/PrefectHQ/prefect/pull/8140 -- Fix minor API docstring formatting issues — https://github.com/PrefectHQ/prefect/pull/8196 + +- Add [recipe contribution page](https://docs.prefect.io/recipes/recipes/#contributing-recipes) and [AWS Chalice](https://docs.prefect.io/recipes/recipes/#recipe-catalog) recipe — +- Add new `discourse` and `blog` admonition types — +- Update Automations and Notifications documentation — +- Fix minor API docstring formatting issues — ### Collections + - [`prefect-openai` 0.1.0](https://github.com/PrefectHQ/prefect-openai) newly released with support for authentication and completions ### Experimental -- Add ability for deployment create and deployment update to create work pool queues — https://github.com/PrefectHQ/prefect/pull/8129 + +- Add ability for deployment create and deployment update to create work pool queues — ## New Contributors -* @mj0nez made their first contribution in https://github.com/PrefectHQ/prefect/pull/8201 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.7.8...2.7.9 +- @mj0nez made their first contribution in + +**All changes**: ## Release 2.7.8 ### Flow run timeline view -We're excited to announce that a new timeline graph has been added to the flow run page. +We're excited to announce that a new timeline graph has been added to the flow run page. This view helps visualize how execution of your flow run takes place in time, an alternative to the radar view that focuses on the structure of dependencies between task runs. This feature is currently in beta and we have lots of improvements planned in the near future! We're looking forward to your feedback. @@ -1101,36 +5963,42 @@ This feature is currently in beta and we have lots of improvements planned in th ![The timeline view visualizes execution of your flow run over time](https://user-images.githubusercontent.com/6200442/212138540-78586356-89bc-4401-a700-b80b15a17020.png) ### Enhancements -- Add [task option `refresh_cache`](https://docs.prefect.io/concepts/tasks/#refreshing-the-cache) to update the cached data for a task run — https://github.com/PrefectHQ/prefect/pull/7856 -- Add logs when a task run receives an abort signal and is in a non-final state — https://github.com/PrefectHQ/prefect/pull/8097 -- Add [publishing of multiarchitecture Docker images](https://hub.docker.com/r/prefecthq/prefect-dev) for development builds — https://github.com/PrefectHQ/prefect/pull/7900 -- Add `httpx.WriteError` to client retryable exceptions — https://github.com/PrefectHQ/prefect/pull/8145 -- Add support for memory limits and privileged containers to `DockerContainer` — https://github.com/PrefectHQ/prefect/pull/8033 + +- Add [task option `refresh_cache`](https://docs.prefect.io/concepts/tasks/#refreshing-the-cache) to update the cached data for a task run — +- Add logs when a task run receives an abort signal and is in a non-final state — +- Add [publishing of multiarchitecture Docker images](https://hub.docker.com/r/prefecthq/prefect-dev) for development builds — +- Add `httpx.WriteError` to client retryable exceptions — +- Add support for memory limits and privileged containers to `DockerContainer` — ### Fixes -- Add support for `allow_failure` to mapped task arguments — https://github.com/PrefectHQ/prefect/pull/8135 -- Update conda requirement regex to support channel and build hashes — https://github.com/PrefectHQ/prefect/pull/8137 -- Add numpy array support to orjson serialization — https://github.com/PrefectHQ/prefect/pull/7912 + +- Add support for `allow_failure` to mapped task arguments — +- Update conda requirement regex to support channel and build hashes — +- Add numpy array support to orjson serialization — ### Experimental -- Rename "Worker pools" to "Work pools" — https://github.com/PrefectHQ/prefect/pull/8107 -- Rename default work pool queue — https://github.com/PrefectHQ/prefect/pull/8117 -- Add worker configuration — https://github.com/PrefectHQ/prefect/pull/8100 -- Add `BaseWorker` and `ProcessWorker` — https://github.com/PrefectHQ/prefect/pull/7996 + +- Rename "Worker pools" to "Work pools" — +- Rename default work pool queue — +- Add worker configuration — +- Add `BaseWorker` and `ProcessWorker` — ### Documentation -- Add YouTube video to welcome page - https://github.com/PrefectHQ/prefect/pull/8090 -- Add social links - https://github.com/PrefectHQ/prefect/pull/8088 -- Increase visibility of Prefect Cloud and Orion REST API documentation - https://github.com/PrefectHQ/prefect/pull/8134 + +- Add YouTube video to welcome page — +- Add social links — +- Increase visibility of Prefect Cloud and Orion REST API documentation — ## New Contributors -* @muddi900 made their first contribution in https://github.com/PrefectHQ/prefect/pull/8101 -* @ddelange made their first contribution in https://github.com/PrefectHQ/prefect/pull/7900 -* @toro-berlin made their first contribution in https://github.com/PrefectHQ/prefect/pull/7856 -* @Ewande made their first contribution in https://github.com/PrefectHQ/prefect/pull/7912 -* @brandonreid made their first contribution in https://github.com/PrefectHQ/prefect/pull/8153 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.7.7...2.7.8 +- @muddi900 made their first contribution in + +- @ddelange made their first contribution in +- @toro-berlin made their first contribution in +- @Ewande made their first contribution in +- @brandonreid made their first contribution in + +**All changes**: ## Release 2.7.7 @@ -1147,61 +6015,67 @@ Note: you can also view the REST API documentation [embedded in our open source We've also improved the parsing and rendering of reference documentation for our Python API. See the [@flow decorator reference](https://docs.prefect.io/api-ref/prefect/flows/#prefect.flows.flow) for example. ### Enhancements -- Add link to blocks catalog after registering blocks in CLI — https://github.com/PrefectHQ/prefect/pull/8017 -- Add schema migration of block documents during `Block.save` — https://github.com/PrefectHQ/prefect/pull/8056 -- Update result factory creation to avoid creating an extra client instance — https://github.com/PrefectHQ/prefect/pull/8072 -- Add logs for deployment flow code loading — https://github.com/PrefectHQ/prefect/pull/8075 -- Update `visit_collection` to support annotations e.g. `allow_failure` — https://github.com/PrefectHQ/prefect/pull/7263 -- Update annotations to inherit from `namedtuple` for serialization support in Dask — https://github.com/PrefectHQ/prefect/pull/8037 -- Add `PREFECT_API_TLS_INSECURE_SKIP_VERIFY` setting to disable client SSL verification — https://github.com/PrefectHQ/prefect/pull/7850 -- Update OpenAPI schema for flow parameters to include positions for display — https://github.com/PrefectHQ/prefect/pull/8013 -- Add parsing of flow docstrings to populate parameter descriptions in the OpenAPI schema — https://github.com/PrefectHQ/prefect/pull/8004 -- Add `validate` to `Block.load` allowing validation to be disabled — https://github.com/PrefectHQ/prefect/pull/7862 -- Improve error message when saving a block with an invalid name — https://github.com/PrefectHQ/prefect/pull/8038 -- Add limit to task run cache key size — https://github.com/PrefectHQ/prefect/pull/7275 -- Add limit to RRule length — https://github.com/PrefectHQ/prefect/pull/7762 -- Add flow run history inside the date range picker - https://github.com/PrefectHQ/orion-design/issues/994 - -### Fixes -- Fix bug where flow timeouts started before waiting for upstreams — https://github.com/PrefectHQ/prefect/pull/7993 -- Fix captured Kubernetes error type in `get_job` — https://github.com/PrefectHQ/prefect/pull/8018 -- Fix `prefect cloud login` error when no workspaces exist — https://github.com/PrefectHQ/prefect/pull/8034 -- Fix serialization of `SecretDict` when used in deployments — https://github.com/PrefectHQ/prefect/pull/8074 -- Fix bug where `visit_collection` could fail when accessing extra Pydantic fields — https://github.com/PrefectHQ/prefect/pull/8083 + +- Add link to blocks catalog after registering blocks in CLI — +- Add schema migration of block documents during `Block.save` — +- Update result factory creation to avoid creating an extra client instance — +- Add logs for deployment flow code loading — +- Update `visit_collection` to support annotations e.g. `allow_failure` — +- Update annotations to inherit from `namedtuple` for serialization support in Dask — +- Add `PREFECT_API_TLS_INSECURE_SKIP_VERIFY` setting to disable client SSL verification — +- Update OpenAPI schema for flow parameters to include positions for display — +- Add parsing of flow docstrings to populate parameter descriptions in the OpenAPI schema — +- Add `validate` to `Block.load` allowing validation to be disabled — +- Improve error message when saving a block with an invalid name — +- Add limit to task run cache key size — +- Add limit to RRule length — +- Add flow run history inside the date range picker — + +### Fixes + +- Fix bug where flow timeouts started before waiting for upstreams — +- Fix captured Kubernetes error type in `get_job` — +- Fix `prefect cloud login` error when no workspaces exist — +- Fix serialization of `SecretDict` when used in deployments — +- Fix bug where `visit_collection` could fail when accessing extra Pydantic fields — ### Experimental -- Add pages and routers for workers — https://github.com/PrefectHQ/prefect/pull/7973 + +- Add pages and routers for workers — ### Documentation -- Update API reference documentation to use new parser and renderer — https://github.com/PrefectHQ/prefect/pull/7855 -- Add new REST API reference using Redoc — https://github.com/PrefectHQ/prefect/pull/7503 + +- Update API reference documentation to use new parser and renderer — +- Add new REST API reference using Redoc — ### Collections + - [`prefect-aws` 0.2.2](https://github.com/PrefectHQ/prefect-aws/releases/tag/v0.2.2) released with many improvements to `S3Bucket` ### Contributors -* @j-tr made their first contribution in https://github.com/PrefectHQ/prefect/pull/8013 -* @toby-coleman made their first contribution in https://github.com/PrefectHQ/prefect/pull/8083 -* @riquelmev made their first contribution in https://github.com/PrefectHQ/prefect/pull/7768 -* @joelluijmes +- @j-tr made their first contribution in -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.7.5...2.7.7 +- @toby-coleman made their first contribution in +- @riquelmev made their first contribution in +- @joelluijmes +**All changes**: ## Release 2.7.6 This release fixes a critical bug in the SQLite database migrations in 2.7.4 and 2.7.5. -See https://github.com/PrefectHQ/prefect/issues/8058 for details. +See for details. -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.7.5...2.7.6 +**All changes**: ## Release 2.7.5 ### Schedule flow runs and read logs from the CLI You can now specify either `--start-in` or `--start-at` when running deployments from the CLI. + ``` ❯ prefect deployment run foo/test --start-at "3pm tomorrow" Creating flow run for deployment 'foo/test'... @@ -1213,38 +6087,44 @@ Created flow run 'pompous-porpoise'. ``` You can also get the logs for a flow run using `prefect flow-run logs ` + ``` -❯ prefect flow-run logs 7aec7a60-a0ab-4f3e-9f2a-479cd85a2aaf -2022-12-29 20:00:40.651 | INFO | Flow run 'optimal-pegasus' - meow -2022-12-29 20:00:40.652 | INFO | Flow run 'optimal-pegasus' - that food in my bowl is gross -2022-12-29 20:00:40.652 | WARNING | Flow run 'optimal-pegasus' - seriously, it needs to be replaced ASAP -2022-12-29 20:00:40.662 | INFO | Flow run 'optimal-pegasus' - Finished in state Completed() +❯ prefect flow-run logs 7aec7a60-a0ab-4f3e-9f2a-479cd85a2aaf +2022-12-29 20:00:40.651 | INFO | Flow run 'optimal-pegasus' — meow +2022-12-29 20:00:40.652 | INFO | Flow run 'optimal-pegasus' — that food in my bowl is gross +2022-12-29 20:00:40.652 | WARNING | Flow run 'optimal-pegasus' — seriously, it needs to be replaced ASAP +2022-12-29 20:00:40.662 | INFO | Flow run 'optimal-pegasus' — Finished in state Completed() ``` ### Enhancements -- Add `--start-in` and `--start-at` to `prefect deployment run` — https://github.com/PrefectHQ/prefect/pull/7772 -- Add `flow-run logs` to get logs using the CLI — https://github.com/PrefectHQ/prefect/pull/7982 + +- Add `--start-in` and `--start-at` to `prefect deployment run` — +- Add `flow-run logs` to get logs using the CLI — ### Documentation -- Fix task annotation in task runner docs — https://github.com/PrefectHQ/prefect/pull/7977 -- Add instructions for building custom blocks — https://github.com/PrefectHQ/prefect/pull/7979 + +- Fix task annotation in task runner docs — +- Add instructions for building custom blocks — ### Collections + - Added `BigQueryWarehouse` block in `prefect-gcp` v0.2.1 - Added `AirbyteConnection` block in `prefect-airbyte` v0.2.0 - Added dbt Cloud metadata API client to `DbtCloudCredentials` in `prefect-dbt` v0.2.7 -### Experimental -- Fix read worker pool queue endpoint — https://github.com/PrefectHQ/prefect/pull/7995 -- Fix error in worker pool queue endpoint — https://github.com/PrefectHQ/prefect/pull/7997 -- Add filtering to flow runs by worker pool and worker pool queue attributes — https://github.com/PrefectHQ/prefect/pull/8006 +### Experimental + +- Fix read worker pool queue endpoint — +- Fix error in worker pool queue endpoint — +- Add filtering to flow runs by worker pool and worker pool queue attributes — ### Contributors -* @ohadch made their first contribution in https://github.com/PrefectHQ/prefect/pull/7982 -* @mohitsaxenaknoldus made their first contribution in https://github.com/PrefectHQ/prefect/pull/7980 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.7.4...2.7.5 +- @ohadch made their first contribution in + +- @mohitsaxenaknoldus made their first contribution in +**All changes**: ## Release 2.7.4 @@ -1295,41 +6175,49 @@ def flaky_function(): return 42 ``` -See https://github.com/PrefectHQ/prefect/pull/7961 for implementation details. +See for implementation details. ### Enhancements -- Add task run names to the `/graph` API route — https://github.com/PrefectHQ/prefect/pull/7951 -- Add vcs directories `.git` and `.hg` (mercurial) to default `.prefectignore` — https://github.com/PrefectHQ/prefect/pull/7919 -- Increase the default thread limit from 40 to 250 — https://github.com/PrefectHQ/prefect/pull/7961 + +- Add task run names to the `/graph` API route — +- Add vcs directories `.git` and `.hg` (mercurial) to default `.prefectignore` — +- Increase the default thread limit from 40 to 250 — ### Deprecations -- Add removal date to tag-based work queue deprecation messages — https://github.com/PrefectHQ/prefect/pull/7930 + +- Add removal date to tag-based work queue deprecation messages — ### Documentation -- Fix `prefect deployment` command listing — https://github.com/PrefectHQ/prefect/pull/7949 -- Add workspace transfer documentation — https://github.com/PrefectHQ/prefect/pull/7941 -- Fix docstring examples in `PrefectFuture` — https://github.com/PrefectHQ/prefect/pull/7877 -- Update `setup.py` metadata to link to correct repo — https://github.com/PrefectHQ/prefect/pull/7933 + +- Fix `prefect deployment` command listing — +- Add workspace transfer documentation — +- Fix docstring examples in `PrefectFuture` — +- Update `setup.py` metadata to link to correct repo — ### Experimental -- Add experimental workers API routes — https://github.com/PrefectHQ/prefect/pull/7896 + +- Add experimental workers API routes — ### Collections + - New [`prefect-google-sheets` collection](https://stefanocascavilla.github.io/prefect-google-sheets/) ### Contributors -* @devanshdoshi9 made their first contribution in https://github.com/PrefectHQ/prefect/pull/7949 -* @stefanocascavilla made their first contribution in https://github.com/PrefectHQ/prefect/pull/7960 -* @quassy made their first contribution in https://github.com/PrefectHQ/prefect/pull/7919 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.7.3...2.7.4 +- @devanshdoshi9 made their first contribution in + +- @stefanocascavilla made their first contribution in +- @quassy made their first contribution in + +**All changes**: ## Release 2.7.3 ### Fixes -- Fix bug where flows with names that do not match the function name could not be loaded — https://github.com/PrefectHQ/prefect/pull/7920 -- Fix type annotation for `KubernetesJob.job_watch_timeout_seconds` — https://github.com/PrefectHQ/prefect/pull/7914 -- Keep data from being lost when assigning a generator to `State.data` — https://github.com/PrefectHQ/prefect/pull/7714 + +- Fix bug where flows with names that do not match the function name could not be loaded — +- Fix type annotation for `KubernetesJob.job_watch_timeout_seconds` — +- Keep data from being lost when assigning a generator to `State.data` — ## Release 2.7.2 @@ -1379,72 +6267,82 @@ When viewing task run concurrency in the UI, each limit has its own page. Includ image - ### Enhancements -- Improve Prefect import time by deferring imports — https://github.com/PrefectHQ/prefect/pull/7836 -- Add Opsgenie notification block — https://github.com/PrefectHQ/prefect/pull/7778 -- Add individual concurrency limit page with active runs list — https://github.com/PrefectHQ/prefect/pull/7848 -- Add `PREFECT_KUBERNETES_CLUSTER_UID` to allow bypass of `kube-system` namespace read — https://github.com/PrefectHQ/prefect/pull/7864 -- Refactor `pause_flow_run` for consistency with engine state handling — https://github.com/PrefectHQ/prefect/pull/7857 -- API: Allow `reject_transition` to return current state — https://github.com/PrefectHQ/prefect/pull/7830 -- Add `SecretDict` block field that obfuscates nested values in a dictionary — https://github.com/PrefectHQ/prefect/pull/7885 + +- Improve Prefect import time by deferring imports — +- Add Opsgenie notification block — +- Add individual concurrency limit page with active runs list — +- Add `PREFECT_KUBERNETES_CLUSTER_UID` to allow bypass of `kube-system` namespace read — +- Refactor `pause_flow_run` for consistency with engine state handling — +- API: Allow `reject_transition` to return current state — +- Add `SecretDict` block field that obfuscates nested values in a dictionary — ### Fixes -- Fix bug where agent concurrency slots may not be released — https://github.com/PrefectHQ/prefect/pull/7845 -- Fix circular imports in the `orchestration` module — https://github.com/PrefectHQ/prefect/pull/7883 -- Fix deployment builds with scripts that contain flow calls - https://github.com/PrefectHQ/prefect/pull/7817 -- Fix path argument behavior in `LocalFileSystem` block - https://github.com/PrefectHQ/prefect/pull/7891 -- Fix flow cancellation in `Process` block on Windows - https://github.com/PrefectHQ/prefect/pull/7799 + +- Fix bug where agent concurrency slots may not be released — +- Fix circular imports in the `orchestration` module — +- Fix deployment builds with scripts that contain flow calls — +- Fix path argument behavior in `LocalFileSystem` block — +- Fix flow cancellation in `Process` block on Windows — ### Documentation -- Add documentation for Automations UI — https://github.com/PrefectHQ/prefect/pull/7833 -- Mention recipes and tutorials under Recipes and Collections pages — https://github.com/PrefectHQ/prefect/pull/7876 -- Add documentation for Task Run Concurrency UI — https://github.com/PrefectHQ/prefect/pull/7840 -- Add `with_options` example to collections usage docs — https://github.com/PrefectHQ/prefect/pull/7894 -- Add a link to orion design and better title to UI readme — https://github.com/PrefectHQ/prefect/pull/7484 + +- Add documentation for Automations UI — +- Mention recipes and tutorials under Recipes and Collections pages — +- Add documentation for Task Run Concurrency UI — +- Add `with_options` example to collections usage docs — +- Add a link to orion design and better title to UI readme — ### Collections -- New [`prefect-kubernetes`](https://prefecthq.github.io/prefect-kubernetes/) collection for [Kubernetes](https://kubernetes.io/) — https://github.com/PrefectHQ/prefect/pull/7907 -- New [`prefect-bitbucket`](https://prefecthq.github.io/prefect-bitbucket/) collection for [Bitbucket](https://bitbucket.org/product) — https://github.com/PrefectHQ/prefect/pull/7907 + +- New [`prefect-kubernetes`](https://prefecthq.github.io/prefect-kubernetes/) collection for [Kubernetes](https://kubernetes.io/) — +- New [`prefect-bitbucket`](https://prefecthq.github.io/prefect-bitbucket/) collection for [Bitbucket](https://bitbucket.org/product) — ## Contributors + - @jlutran -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.7.1...2.7.2 +**All changes**: ## Release 2.7.1 ### Task concurrency limits page + You can now add task concurrency limits in the ui! ![image](https://user-images.githubusercontent.com/6200442/206586749-3f9fff36-5359-41a9-8727-60523cf89071.png) ### Enhancements -- Add extra entrypoints setting for user module injection; allows registration of custom blocks — https://github.com/PrefectHQ/prefect/pull/7179 -- Update orchestration rule to wait for scheduled time to only apply to transition to running — https://github.com/PrefectHQ/prefect/pull/7585 -- Use cluster UID and namespace instead of cluster "name" for `KubernetesJob` identifiers — https://github.com/PrefectHQ/prefect/pull/7747 -- Add a task run concurrency limits page — https://github.com/PrefectHQ/prefect/pull/7779 -- Add setting to toggle interpreting square brackets as style — https://github.com/PrefectHQ/prefect/pull/7810 -- Move `/health` API route to root router — https://github.com/PrefectHQ/prefect/pull/7765 -- Add `PREFECT_API_ENABLE_HTTP2` setting to allow HTTP/2 to be disabled — https://github.com/PrefectHQ/prefect/pull/7802 -- Monitor process after kill and return early when possible — https://github.com/PrefectHQ/prefect/pull/7746 -- Update `KubernetesJob` to watch jobs without timeout by default — https://github.com/PrefectHQ/prefect/pull/7786 -- Bulk deletion of flows, deployments, and work queues from the UI - https://github.com/PrefectHQ/prefect/pull/7824 + +- Add extra entrypoints setting for user module injection; allows registration of custom blocks — +- Update orchestration rule to wait for scheduled time to only apply to transition to running — +- Use cluster UID and namespace instead of cluster "name" for `KubernetesJob` identifiers — +- Add a task run concurrency limits page — +- Add setting to toggle interpreting square brackets as style — +- Move `/health` API route to root router — +- Add `PREFECT_API_ENABLE_HTTP2` setting to allow HTTP/2 to be disabled — +- Monitor process after kill and return early when possible — +- Update `KubernetesJob` to watch jobs without timeout by default — +- Bulk deletion of flows, deployments, and work queues from the UI — ### Fixes -- Add lock to ensure that alembic commands are not run concurrently — https://github.com/PrefectHQ/prefect/pull/7789 -- Release task concurrency slots when transition is rejected as long as the task is not in a running state — https://github.com/PrefectHQ/prefect/pull/7798 -- Fix issue with improperly parsed flow run notification URLs — https://github.com/PrefectHQ/prefect/pull/7173 -- Fix radar not updating without refreshing the page - https://github.com/PrefectHQ/prefect/pull/7824 -- UI: Fullscreen layouts on screens < `lg` should take up all the available space — https://github.com/PrefectHQ/prefect/pull/7792 + +- Add lock to ensure that alembic commands are not run concurrently — +- Release task concurrency slots when transition is rejected as long as the task is not in a running state — +- Fix issue with improperly parsed flow run notification URLs — +- Fix radar not updating without refreshing the page — +- UI: Fullscreen layouts on screens < `lg` should take up all the available space — ### Documentation -- Add documentation for creating a flow run from deployments — https://github.com/PrefectHQ/prefect/pull/7696 -- Move `wait_for` examples to the tasks documentation — https://github.com/PrefectHQ/prefect/pull/7788 + +- Add documentation for creating a flow run from deployments — +- Move `wait_for` examples to the tasks documentation — ## Contributors -* @t-yuki made their first contribution in https://github.com/PrefectHQ/prefect/pull/7741 -* @padbk made their first contribution in https://github.com/PrefectHQ/prefect/pull/7173 + +- @t-yuki made their first contribution in + +- @padbk made their first contribution in ## Release 2.7.0 @@ -1452,7 +6350,7 @@ You can now add task concurrency limits in the ui! We're excited to announce a new flow run cancellation feature! -Flow runs can be cancelled from the CLI, UI, REST API, or Python client. +Flow runs can be cancelled from the CLI, UI, REST API, or Python client. For example: @@ -1462,25 +6360,25 @@ prefect flow-run cancel When cancellation is requested, the flow run is moved to a "Cancelling" state. The agent monitors the state of flow runs and detects that cancellation has been requested. The agent then sends a signal to the flow run infrastructure, requesting termination of the run. If the run does not terminate after a grace period (default of 30 seconds), the infrastructure will be killed, ensuring the flow run exits. -Unlike the implementation of cancellation in Prefect 1 — which could fail if the flow run was stuck — this provides a strong guarantee of cancellation. +Unlike the implementation of cancellation in Prefect 1 — which could fail if the flow run was stuck — this provides a strong guarantee of cancellation. Note: this process is robust to agent restarts, but does require that an agent is running to enforce cancellation. Support for cancellation has been added to all core library infrastructure types: -- Docker Containers (https://github.com/PrefectHQ/prefect/pull/7684) -- Kubernetes Jobs (https://github.com/PrefectHQ/prefect/pull/7701) -- Processes (https://github.com/PrefectHQ/prefect/pull/7635) +- Docker Containers () +- Kubernetes Jobs () +- Processes () Cancellation support is in progress for all collection infrastructure types: -- ECS Tasks (https://github.com/PrefectHQ/prefect-aws/pull/163) -- Google Cloud Run Jobs (https://github.com/PrefectHQ/prefect-gcp/pull/76) -- Azure Container Instances (https://github.com/PrefectHQ/prefect-azure/pull/58) +- ECS Tasks () +- Google Cloud Run Jobs () +- Azure Container Instances () At this time, this feature requires the flow run to be submitted by an agent — flow runs without deployments cannot be cancelled yet, but that feature is [coming soon](https://github.com/PrefectHQ/prefect/pull/7150). -See https://github.com/PrefectHQ/prefect/pull/7637 for more details +See for more details ### Flow run pause and resume @@ -1503,7 +6401,7 @@ A timeout can be supplied to the `pause_flow_run` utility — if the flow run is This blocking style of pause that keeps infrastructure running is supported for all flow runs, including subflow runs. -See https://github.com/PrefectHQ/prefect/pull/7637 for more details. +See for more details. ### Logging of prints in flows and tasks @@ -1540,8 +6438,7 @@ This print statement will appear locally as normal, but won't be sent to the Pre See [the logging documentation](https://docs.prefect.io/concepts/logs/#logging-print-statements) for more details. -See https://github.com/PrefectHQ/prefect/pull/7580 for implementation details. - +See for implementation details. ### Agent flow run concurrency limits @@ -1559,53 +6456,56 @@ This feature is especially useful for limiting resource consumption when running Thanks to @eudyptula for contributing! -See https://github.com/PrefectHQ/prefect/pull/7361 for more details. - - -### Enhancements -- Add agent reporting of crashed flow run infrastructure — https://github.com/PrefectHQ/prefect/pull/7670 -- Add Twilio SMS notification block — https://github.com/PrefectHQ/prefect/pull/7685 -- Add PagerDuty Webhook notification block — https://github.com/PrefectHQ/prefect/pull/7534 -- Add jitter to the agent query loop — https://github.com/PrefectHQ/prefect/pull/7652 -- Include final state logs in logs sent to API — https://github.com/PrefectHQ/prefect/pull/7647 -- Add `tags` and `idempotency_key` to `run deployment` — https://github.com/PrefectHQ/prefect/pull/7641 -- The final state of a flow is now `Cancelled` when any task finishes in a `Cancelled` state — https://github.com/PrefectHQ/prefect/pull/7694 -- Update login to prompt for "API key" instead of "authentication key" — https://github.com/PrefectHQ/prefect/pull/7649 -- Disable cache on result retrieval if disabled on creation — https://github.com/PrefectHQ/prefect/pull/7627 -- Raise `CancelledRun` when retrieving a `Cancelled` state's result — https://github.com/PrefectHQ/prefect/pull/7699 -- Use new database session to send each flow run notification — https://github.com/PrefectHQ/prefect/pull/7644 -- Increase default agent query interval to 10s — https://github.com/PrefectHQ/prefect/pull/7703 -- Add default messages to state exceptions — https://github.com/PrefectHQ/prefect/pull/7705 -- Update `run_sync_in_interruptible_worker_thread` to use an event — https://github.com/PrefectHQ/prefect/pull/7704 -- Increase default database query timeout to 10s — https://github.com/PrefectHQ/prefect/pull/7717 - -### Fixes -- Prompt workspace selection if API key is set, but API URL is not set — https://github.com/PrefectHQ/prefect/pull/7648 -- Use `PREFECT_UI_URL` for flow run notifications — https://github.com/PrefectHQ/prefect/pull/7698 -- Display all parameter values a flow run was triggered with in the UI (defaults and overrides) — https://github.com/PrefectHQ/prefect/pull/7697 -- Fix bug where result event is missing when wait is called before submission completes — https://github.com/PrefectHQ/prefect/pull/7571 -- Fix support for sync-compatible calls in `deployment build` — https://github.com/PrefectHQ/prefect/pull/7417 -- Fix bug in `StateGroup` that caused `all_final` to be wrong — https://github.com/PrefectHQ/prefect/pull/7678 -- Add retry on specified httpx network errors — https://github.com/PrefectHQ/prefect/pull/7593 -- Fix state display bug when state message is empty — https://github.com/PrefectHQ/prefect/pull/7706 - -### Documentation -- Fix heading links in docs — https://github.com/PrefectHQ/prefect/pull/7665 -- Update login and `PREFECT_API_URL` configuration notes — https://github.com/PrefectHQ/prefect/pull/7674 -- Add documentation about AWS retries configuration — https://github.com/PrefectHQ/prefect/pull/7691 -- Add GitLab storage block to deployment CLI docs — https://github.com/PrefectHQ/prefect/pull/7686 -- Add links to Cloud Run and Container Instance infrastructure — https://github.com/PrefectHQ/prefect/pull/7690 -- Update docs on final state determination to reflect `Cancelled` state changes — https://github.com/PrefectHQ/prefect/pull/7700 -- Fix link in 'Agents and Work Queues' documentation — https://github.com/PrefectHQ/prefect/pull/7659 - -### Contributors -- @brian-pond made their first contribution in https://github.com/PrefectHQ/prefect/pull/7659 -- @YtKC made their first contribution in https://github.com/PrefectHQ/prefect/pull/7641 -- @eudyptula made their first contribution in https://github.com/PrefectHQ/prefect/pull/7361 +See for more details. + +### Enhancements + +- Add agent reporting of crashed flow run infrastructure — +- Add Twilio SMS notification block — +- Add PagerDuty Webhook notification block — +- Add jitter to the agent query loop — +- Include final state logs in logs sent to API — +- Add `tags` and `idempotency_key` to `run deployment` — +- The final state of a flow is now `Cancelled` when any task finishes in a `Cancelled` state — +- Update login to prompt for "API key" instead of "authentication key" — +- Disable cache on result retrieval if disabled on creation — +- Raise `CancelledRun` when retrieving a `Cancelled` state's result — +- Use new database session to send each flow run notification — +- Increase default agent query interval to 10s — +- Add default messages to state exceptions — +- Update `run_sync_in_interruptible_worker_thread` to use an event — +- Increase default database query timeout to 10s — + +### Fixes + +- Prompt workspace selection if API key is set, but API URL is not set — +- Use `PREFECT_UI_URL` for flow run notifications — +- Display all parameter values a flow run was triggered with in the UI (defaults and overrides) — +- Fix bug where result event is missing when wait is called before submission completes — +- Fix support for sync-compatible calls in `deployment build` — +- Fix bug in `StateGroup` that caused `all_final` to be wrong — +- Add retry on specified httpx network errors — +- Fix state display bug when state message is empty — + +### Documentation + +- Fix heading links in docs — +- Update login and `PREFECT_API_URL` configuration notes — +- Add documentation about AWS retries configuration — +- Add GitLab storage block to deployment CLI docs — +- Add links to Cloud Run and Container Instance infrastructure — +- Update docs on final state determination to reflect `Cancelled` state changes — +- Fix link in 'Agents and Work Queues' documentation — + +### Contributors + +- @brian-pond made their first contribution in +- @YtKC made their first contribution in +- @eudyptula made their first contribution in - @hateyouinfinity - @jmrobbins13 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.6.9...2.7.0 +**All changes**: ## Release 2.6.9 @@ -1631,235 +6531,268 @@ It also detects existing authentication: There's also a new `prefect cloud logout` command (contributed by @hallenmaia) to remove credentials from the current profile. ### Enhancements -- Add automatic upper-casing of string log level settings — https://github.com/PrefectHQ/prefect/pull/7592 -- Add `infrastructure_pid` to flow run — https://github.com/PrefectHQ/prefect/pull/7595 -- Add `PrefectFormatter` to reduce logging configuration duplication — https://github.com/PrefectHQ/prefect/pull/7588 -- Update `CloudClient.read_workspaces` to return a model — https://github.com/PrefectHQ/prefect/pull/7332 -- Update hashing utilities to allow execution in FIPS 140-2 environments — https://github.com/PrefectHQ/prefect/pull/7620 + +- Add automatic upper-casing of string log level settings — +- Add `infrastructure_pid` to flow run — +- Add `PrefectFormatter` to reduce logging configuration duplication — +- Update `CloudClient.read_workspaces` to return a model — +- Update hashing utilities to allow execution in FIPS 140-2 environments — ### Fixes -- Update logging setup to support incremental configuration — https://github.com/PrefectHQ/prefect/pull/7569 -- Update logging `JsonFormatter` to output valid JSON — https://github.com/PrefectHQ/prefect/pull/7567 -- Remove `inter` CSS import, which blocked UI loads in air-gapped environments — https://github.com/PrefectHQ/prefect/pull/7586 -- Return 404 when a flow run is missing during `set_task_run_state` — https://github.com/PrefectHQ/prefect/pull/7603 -- Fix directory copy errors with `LocalFileSystem` deployments on Python 3.7 — https://github.com/PrefectHQ/prefect/pull/7441 -- Add flush of task run logs when on remote workers — https://github.com/PrefectHQ/prefect/pull/7626 + +- Update logging setup to support incremental configuration — +- Update logging `JsonFormatter` to output valid JSON — +- Remove `inter` CSS import, which blocked UI loads in air-gapped environments — +- Return 404 when a flow run is missing during `set_task_run_state` — +- Fix directory copy errors with `LocalFileSystem` deployments on Python 3.7 — +- Add flush of task run logs when on remote workers — ### Documentation -- Add docs about CPU and memory allocation on agent deploying ECS infrastructure blocks — https://github.com/PrefectHQ/prefect/pull/7597 + +- Add docs about CPU and memory allocation on agent deploying ECS infrastructure blocks — ### Contributors -- @hallenmaia + +- @hallenmaia - @szelenka -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.6.8...2.6.9 +**All changes**: ## Release 2.6.8 ### Enhancements -- Add `--run-once` to `prefect agent start` CLI — https://github.com/PrefectHQ/prefect/pull/7505 -- Expose `prefetch-seconds` in `prefect agent start` CLI — https://github.com/PrefectHQ/prefect/pull/7498 -- Add start time sort for flow runs to the REST API — https://github.com/PrefectHQ/prefect/pull/7496 -- Add `merge_existing_data` flag to `update_block_document` — https://github.com/PrefectHQ/prefect/pull/7470 -- Add sanitization to enforce leading/trailing alphanumeric characters for Kubernetes job labels — https://github.com/PrefectHQ/prefect/pull/7528 + +- Add `--run-once` to `prefect agent start` CLI — +- Expose `prefetch-seconds` in `prefect agent start` CLI — +- Add start time sort for flow runs to the REST API — +- Add `merge_existing_data` flag to `update_block_document` — +- Add sanitization to enforce leading/trailing alphanumeric characters for Kubernetes job labels — ### Fixes -- Fix type checking for flow name and version arguments — https://github.com/PrefectHQ/prefect/pull/7549 -- Fix check for empty paths in `LocalFileSystem` — https://github.com/PrefectHQ/prefect/pull/7477 -- Fix `PrefectConsoleHandler` bug where log tracebacks were excluded — https://github.com/PrefectHQ/prefect/pull/7558 + +- Fix type checking for flow name and version arguments — +- Fix check for empty paths in `LocalFileSystem` — +- Fix `PrefectConsoleHandler` bug where log tracebacks were excluded — ### Documentation -- Add glow to Collection Catalog images in dark mode — https://github.com/PrefectHQ/prefect/pull/7535 + +- Add glow to Collection Catalog images in dark mode — - New [`prefect-vault`](https://github.com/pbchekin/prefect-vault) collection for integration with Hashicorp Vault ## Contributors -* @kielnino made their first contribution in https://github.com/PrefectHQ/prefect/pull/7517 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.6.7...2.6.8 +- @kielnino made their first contribution in + +**All changes**: ## Release 2.6.7 ### Enhancements -- Add timeout support to tasks — https://github.com/PrefectHQ/prefect/pull/7409 -- Add colored log levels — https://github.com/PrefectHQ/prefect/pull/6101 -- Update flow and task run page sidebar styling — https://github.com/PrefectHQ/prefect/pull/7426 -- Add redirect to logs tab when navigating to parent or child flow runs — https://github.com/PrefectHQ/prefect/pull/7439 -- Add `PREFECT_UI_URL` and `PREFECT_CLOUD_UI_URL` settings — https://github.com/PrefectHQ/prefect/pull/7411 -- Improve scheduler performance — https://github.com/PrefectHQ/prefect/pull/7450 https://github.com/PrefectHQ/prefect/pull/7433 -- Add link to parent flow from subflow details page — https://github.com/PrefectHQ/prefect/pull/7491 -- Improve visibility of deployment tags in the deployments page — https://github.com/PrefectHQ/prefect/pull/7491 -- Add deployment and flow metadata to infrastructure labels — https://github.com/PrefectHQ/prefect/pull/7479 -- Add obfuscation of secret settings — https://github.com/PrefectHQ/prefect/pull/7465 + +- Add timeout support to tasks — +- Add colored log levels — +- Update flow and task run page sidebar styling — +- Add redirect to logs tab when navigating to parent or child flow runs — +- Add `PREFECT_UI_URL` and `PREFECT_CLOUD_UI_URL` settings — +- Improve scheduler performance — +- Add link to parent flow from subflow details page — +- Improve visibility of deployment tags in the deployments page — +- Add deployment and flow metadata to infrastructure labels — +- Add obfuscation of secret settings — ### Fixes -- Fix missing import for `ObjectAlreadyExists` exception in deployments module — https://github.com/PrefectHQ/prefect/pull/7360 -- Fix export of `State` and `allow_failure` for type-checkers — https://github.com/PrefectHQ/prefect/pull/7447 -- Fix `--skip-upload` flag in `prefect deployment build` — https://github.com/PrefectHQ/prefect/pull/7437 -- Fix `visit_collection` handling of IO objects — https://github.com/PrefectHQ/prefect/pull/7482 -- Ensure that queries are sorted correctly when limits are used — https://github.com/PrefectHQ/prefect/pull/7457 + +- Fix missing import for `ObjectAlreadyExists` exception in deployments module — +- Fix export of `State` and `allow_failure` for type-checkers — +- Fix `--skip-upload` flag in `prefect deployment build` — +- Fix `visit_collection` handling of IO objects — +- Ensure that queries are sorted correctly when limits are used — ### Deprecations -- `PREFECT_CLOUD_URL` has been deprecated in favor of `PREFECT_CLOUD_API_URL` — https://github.com/PrefectHQ/prefect/pull/7411 -- `prefect.orion.utilities.names` has been deprecated in favor of `prefect.utilities.names` — https://github.com/PrefectHQ/prefect/pull/7465 + +- `PREFECT_CLOUD_URL` has been deprecated in favor of `PREFECT_CLOUD_API_URL` — +- `prefect.orion.utilities.names` has been deprecated in favor of `prefect.utilities.names` — ### Documentation -- Add support for dark mode — https://github.com/PrefectHQ/prefect/pull/7432 and https://github.com/PrefectHQ/prefect/pull/7462 -- Add [audit log documentation](https://docs.prefect.io/ui/audit-log/) for Prefect Cloud — https://github.com/PrefectHQ/prefect/pull/7404 -- Add [troubleshooting topics](https://docs.prefect.io/ui/troubleshooting/) for Prefect Cloud — https://github.com/PrefectHQ/prefect/pull/7446 + +- Add support for dark mode — and +- Add [audit log documentation](https://docs.prefect.io/ui/audit-log/) for Prefect Cloud — +- Add [troubleshooting topics](https://docs.prefect.io/ui/troubleshooting/) for Prefect Cloud — ### Collections -- Adds auto-registration of blocks from AWS, Azure, GCP, and Databricks collections — https://github.com/PrefectHQ/prefect/pull/7415 -- Add new [`prefect-hightouch`](https://prefecthq.github.io/prefect-hightouch/) collection for [Hightouch](https://hightouch.com/) — https://github.com/PrefectHQ/prefect/pull/7443 + +- Adds auto-registration of blocks from AWS, Azure, GCP, and Databricks collections — +- Add new [`prefect-hightouch`](https://prefecthq.github.io/prefect-hightouch/) collection for [Hightouch](https://hightouch.com/) — ### Contributors -- @tekumara -- @bcbernardo made their first contribution in https://github.com/PrefectHQ/prefect/pull/7360 -- @br3ndonland made their first contribution in https://github.com/PrefectHQ/prefect/pull/7432 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.6.6...2.6.7 +- @tekumara +- @bcbernardo made their first contribution in +- @br3ndonland made their first contribution in +**All changes**: ## Release 2.6.6 ### Enhancements + - Add work queue status and health display to UI — [#733](https://github.com/PrefectHQ/orion-design/pull/733), [#743](https://github.com/PrefectHQ/orion-design/pull/743), [#750](https://github.com/PrefectHQ/orion-design/pull/750) -- Add `wait_for` to flows; subflows can wait for upstream tasks — https://github.com/PrefectHQ/prefect/pull/7343 -- Add informative error if flow run is deleted while running — https://github.com/PrefectHQ/prefect/pull/7390 -- Add name filtering support to the `work_queues/filter` API route — https://github.com/PrefectHQ/prefect/pull/7394 -- Improve the stability of the scheduler service — https://github.com/PrefectHQ/prefect/pull/7412 +- Add `wait_for` to flows; subflows can wait for upstream tasks — +- Add informative error if flow run is deleted while running — +- Add name filtering support to the `work_queues/filter` API route — +- Improve the stability of the scheduler service — ### Fixes -- Fix GitHub storage error for Windows — https://github.com/PrefectHQ/prefect/pull/7372 -- Fix links to flow runs in notifications — https://github.com/PrefectHQ/prefect/pull/7249 -- Fix link to UI deployment page in CLI — https://github.com/PrefectHQ/prefect/pull/7376 -- Fix UI URL routing to be consistent with CLI — https://github.com/PrefectHQ/prefect/pull/7391 -- Assert that command is a list when passed to `open_process` — https://github.com/PrefectHQ/prefect/pull/7389 -- Fix JSON error when serializing certain flow run parameters such as dataframes — https://github.com/PrefectHQ/prefect/pull/7385 + +- Fix GitHub storage error for Windows — +- Fix links to flow runs in notifications — +- Fix link to UI deployment page in CLI — +- Fix UI URL routing to be consistent with CLI — +- Assert that command is a list when passed to `open_process` — +- Fix JSON error when serializing certain flow run parameters such as dataframes — ### Documentation -- Add versioning documentation — https://github.com/PrefectHQ/prefect/pull/7353 + +- Add versioning documentation — ### Collections + - New [`prefect-alert`](https://github.com/khuyentran1401/prefect-alert) collection for sending alerts on flow run fail - New [Fivetran](https://fivetran.github.io/prefect-fivetran/) collection - New [GitLab](https://prefecthq.github.io/prefect-gitlab/) collection ## Contributors + - @marwan116 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.6.5...2.6.6 +**All changes**: ## Release 2.6.5 ### Enhancements -- Add support for manual flow run retries — https://github.com/PrefectHQ/prefect/pull/7152 -- Improve server performance when retrying flow runs with many tasks — https://github.com/PrefectHQ/prefect/pull/7152 -- Add status checks to work queues — https://github.com/PrefectHQ/prefect/pull/7262 -- Add timezone parameter to `prefect deployment build` — https://github.com/PrefectHQ/prefect/pull/7282 -- UI: Add redirect to original block form after creating a nested block — https://github.com/PrefectHQ/prefect/pull/7284 -- Add support for multiple work queue prefixes — https://github.com/PrefectHQ/prefect/pull/7222 -- Include "-" before random suffix of Kubernetes job names — https://github.com/PrefectHQ/prefect/pull/7329 -- Allow a working directory to be specified for `Process` infrastructure — https://github.com/PrefectHQ/prefect/pull/7252 -- Add support for Python 3.11 — https://github.com/PrefectHQ/prefect/pull/7304 -- Add persistence of data when a state is returned from a task or flow — https://github.com/PrefectHQ/prefect/pull/7316 -- Add `ignore_file` to `Deployment.build_from_flow()` — https://github.com/PrefectHQ/prefect/pull/7012 + +- Add support for manual flow run retries — +- Improve server performance when retrying flow runs with many tasks — +- Add status checks to work queues — +- Add timezone parameter to `prefect deployment build` — +- UI: Add redirect to original block form after creating a nested block — +- Add support for multiple work queue prefixes — +- Include "-" before random suffix of Kubernetes job names — +- Allow a working directory to be specified for `Process` infrastructure — +- Add support for Python 3.11 — +- Add persistence of data when a state is returned from a task or flow — +- Add `ignore_file` to `Deployment.build_from_flow()` — ### Fixes -- Allow `with_options` to reset retries and retry delays — https://github.com/PrefectHQ/prefect/pull/7276 -- Fix proxy-awareness in the `OrionClient` — https://github.com/PrefectHQ/prefect/pull/7328 -- Fix block auto-registration when changing databases — https://github.com/PrefectHQ/prefect/pull/7350 -- Include hidden files when uploading directories to `RemoteFileSystem` storage — https://github.com/PrefectHQ/prefect/pull/7336 -- UI: added support for unsetting color-mode preference, `null` is now equivalent to "default" — https://github.com/PrefectHQ/prefect/pull/7321 + +- Allow `with_options` to reset retries and retry delays — +- Fix proxy-awareness in the `OrionClient` — +- Fix block auto-registration when changing databases — +- Include hidden files when uploading directories to `RemoteFileSystem` storage — +- UI: added support for unsetting color-mode preference, `null` is now equivalent to "default" — ### Documentation -- Add documentation for Prefect Cloud SSO — https://github.com/PrefectHQ/prefect/pull/7302 + +- Add documentation for Prefect Cloud SSO — ### Collections + - New [`prefect-docker`](https://prefecthq.github.io/prefect-docker/) collection for [Docker](https://www.docker.com/) - New [`prefect-census`](https://prefecthq.github.io/prefect-census/) collection for [Census](https://docs.getcensus.com/) ## Contributors -- @BallisticPain made their first contribution in https://github.com/PrefectHQ/prefect/pull/7252 + +- @BallisticPain made their first contribution in - @deepyaman - @hateyouinfinity - @jmg-duarte - @taljaards -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.6.4...2.6.5 +**All changes**: ## Release 2.6.4 ### Enhancements -- UI: Rename deployment "Overview" tab to "Description" — https://github.com/PrefectHQ/prefect/pull/7234 -- Add `Deployment.build_from_flow` toggle to disable loading of existing values from the API — https://github.com/PrefectHQ/prefect/pull/7218 -- Add `PREFECT_RESULTS_PERSIST_BY_DEFAULT` setting to globally toggle the result persistence default — https://github.com/PrefectHQ/prefect/pull/7228 -- Add support for using callable objects as tasks — https://github.com/PrefectHQ/prefect/pull/7217 -- Add authentication as service principal to the `Azure` storage block — https://github.com/PrefectHQ/prefect/pull/6844 -- Update default database timeout from 1 to 5 seconds — https://github.com/PrefectHQ/prefect/pull/7246 + +- UI: Rename deployment "Overview" tab to "Description" — +- Add `Deployment.build_from_flow` toggle to disable loading of existing values from the API — +- Add `PREFECT_RESULTS_PERSIST_BY_DEFAULT` setting to globally toggle the result persistence default — +- Add support for using callable objects as tasks — +- Add authentication as service principal to the `Azure` storage block — +- Update default database timeout from 1 to 5 seconds — ### Fixes -- Allow image/namespace fields to be loaded from Kubernetes job manifest — https://github.com/PrefectHQ/prefect/pull/7244 -- UI: Update settings API call to respect `ORION_UI_SERVE_BASE` environment variable — https://github.com/PrefectHQ/prefect/pull/7068 -- Fix entrypoint path error when deployment is created on Windows then run on Unix — https://github.com/PrefectHQ/prefect/pull/7261 + +- Allow image/namespace fields to be loaded from Kubernetes job manifest — +- UI: Update settings API call to respect `ORION_UI_SERVE_BASE` environment variable — +- Fix entrypoint path error when deployment is created on Windows then run on Unix — ### Collections + - New [`prefect-kv`](https://github.com/madkinsz/prefect-kv) collection for persisting key-value data -- `prefect-aws`: Update [`S3Bucket`](https://prefecthq.github.io/prefect-aws/s3/#prefect_aws.s3.S3Bucket) storage block to enable use with deployments — https://github.com/PrefectHQ/prefect-aws/pull/82 -- `prefect-aws`: Add support for arbitrary user customizations to [`ECSTask`](https://prefecthq.github.io/prefect-aws/ecs/) block — https://github.com/PrefectHQ/prefect-aws/pull/120 +- `prefect-aws`: Update [`S3Bucket`](https://prefecthq.github.io/prefect-aws/s3/#prefect_aws.s3.S3Bucket) storage block to enable use with deployments — +- `prefect-aws`: Add support for arbitrary user customizations to [`ECSTask`](https://prefecthq.github.io/prefect-aws/ecs/) block — - `prefect-aws`: Removed the experimental designation from the [`ECSTask`](https://prefecthq.github.io/prefect-aws/ecs/) block -- `prefect-azure`: New [`AzureContainerInstanceJob`](https://prefecthq.github.io/prefect-azure/container_instance/) infrastructure block to run flows or commands as containers on Azure — https://github.com/PrefectHQ/prefect-azure/pull/45 +- `prefect-azure`: New [`AzureContainerInstanceJob`](https://prefecthq.github.io/prefect-azure/container_instance/) infrastructure block to run flows or commands as containers on Azure — ### Contributors + - @Trymzet - @jmg-duarte -- @mthanded made their first contribution in https://github.com/PrefectHQ/prefect/pull/7068 +- @mthanded made their first contribution in -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.6.3...2.6.4 +**All changes**: ## Release 2.6.3 ### Fixes -- Fix handling of `cache_result_in_memory` in `Task.with_options` — https://github.com/PrefectHQ/prefect/pull/7227 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.6.2...2.6.3 +- Fix handling of `cache_result_in_memory` in `Task.with_options` — + +**All changes**: ## Release 2.6.2 ### Enhancements -- Add `CompressedSerializer` for compression of other result serializers — https://github.com/PrefectHQ/prefect/pull/7164 -- Add option to drop task or flow return values from memory — https://github.com/PrefectHQ/prefect/pull/7174 -- Add support for creating and reading notification policies from the client — https://github.com/PrefectHQ/prefect/pull/7154 -- Add API support for sorting deployments — https://github.com/PrefectHQ/prefect/pull/7187 -- Improve searching and sorting of flows and deployments in the UI — https://github.com/PrefectHQ/prefect/pull/7160 -- Improve recurrence rule schedule parsing with support for compound rules — https://github.com/PrefectHQ/prefect/pull/7165 -- Add support for private GitHub repositories — https://github.com/PrefectHQ/prefect/pull/7107 + +- Add `CompressedSerializer` for compression of other result serializers — +- Add option to drop task or flow return values from memory — +- Add support for creating and reading notification policies from the client — +- Add API support for sorting deployments — +- Improve searching and sorting of flows and deployments in the UI — +- Improve recurrence rule schedule parsing with support for compound rules — +- Add support for private GitHub repositories — ### Fixes -- Improve orchestration handling of `after_transition` when exception encountered — https://github.com/PrefectHQ/prefect/pull/7156 -- Prevent block name from being reused on the block creation form in the UI — https://github.com/PrefectHQ/prefect/pull/7096 -- Fix bug where `with_options` incorrectly updates result settings — https://github.com/PrefectHQ/prefect/pull/7186 -- Add backwards compatibility for return of server-states from flows and tasks — https://github.com/PrefectHQ/prefect/pull/7189 -- Fix naming of subflow runs tab on flow run page in the UI — https://github.com/PrefectHQ/prefect/pull/7192 -- Fix `prefect orion start` error on Windows when module path contains spaces — https://github.com/PrefectHQ/prefect/pull/7224 +- Improve orchestration handling of `after_transition` when exception encountered — +- Prevent block name from being reused on the block creation form in the UI — +- Fix bug where `with_options` incorrectly updates result settings — +- Add backwards compatibility for return of server-states from flows and tasks — +- Fix naming of subflow runs tab on flow run page in the UI — +- Fix `prefect orion start` error on Windows when module path contains spaces — ### Collections + - New [prefect-monte-carlo](https://prefecthq.github.io/prefect-monte-carlo/) collection for interaction with [Monte Carlo](https://www.montecarlodata.com/) ### Contributors + - @jmg-duarte -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.6.1...2.6.2 +**All changes**: ## Release 2.6.1 ### Fixes -- Fix bug where return values of `{}` or `[]` could be coerced to `None` — https://github.com/PrefectHQ/prefect/pull/7181 + +- Fix bug where return values of `{}` or `[]` could be coerced to `None` — ## Contributors -* @acookin made their first contribution in https://github.com/PrefectHQ/prefect/pull/7172 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.6.0...2.6.1 +- @acookin made their first contribution in + +**All changes**: ## Release 2.6.0 @@ -1871,17 +6804,17 @@ In this release, we're excited to announce this behavior is fully configurable a Here are some highlights: - Persistence of results is off by default. - - We will turn on result persistence automatically if needed for a feature you're using, but you can always opt-out. - - You can easily opt-in for any flow or task. + — We will turn on result persistence automatically if needed for a feature you're using, but you can always opt-out. + — You can easily opt-in for any flow or task. - You can choose the result serializer. - - By default, we continue to use a pickle serializer, now with the ability to choose a custom implementation. - - We now offer a JSON result serializer with support for all of the types supported by Pydantic. - - You can also write your own serializer for full control. - - Unless your results are being persisted, they will not be serialized. + — By default, we continue to use a pickle serializer, now with the ability to choose a custom implementation. + — We now offer a JSON result serializer with support for all of the types supported by Pydantic. + — You can also write your own serializer for full control. + — Unless your results are being persisted, they will not be serialized. - You can change the result storage. - - By default, we will continue to use the local file system. - - You can specify any of our storage blocks, such as AWS S3. - - You can use any storage block you have defined. + — By default, we will continue to use the local file system. + — You can specify any of our storage blocks, such as AWS S3. + — You can use any storage block you have defined. All of the options can be customized per flow or task. @@ -1917,7 +6850,7 @@ def four() ``` See the [documentation](https://docs.prefect.io/concepts/results/) for more details and examples. -See https://github.com/PrefectHQ/prefect/pull/6908 for implementation details. +See for implementation details. ### Waiting for tasks even if they fail @@ -1940,80 +6873,90 @@ def important_cleanup(): ... ``` -See https://github.com/PrefectHQ/prefect/pull/7120 for implementation details. +See for implementation details. ### Work queue match support for agents Agents can now match multiple work queues by providing a `--match` string instead of specifying all of the work queues. The agent will poll every work queue with a name that starts with the given string. Your agent will detect new work queues that match the option without requiring a restart! ``` -$ prefect agent start --match "foo-" +prefect agent start --match "foo-" ``` ### Enhancements -- Add `--param` / `--params` support `prefect deployment run` — https://github.com/PrefectHQ/prefect/pull/7018 -- Add 'Show Active Runs' button to work queue page — https://github.com/PrefectHQ/prefect/pull/7092 -- Update block protection to only prevent deletion — https://github.com/PrefectHQ/prefect/pull/7042 -- Improve stability by optimizing the HTTP client — https://github.com/PrefectHQ/prefect/pull/7090 -- Optimize flow run history queries — https://github.com/PrefectHQ/prefect/pull/7138 -- Optimize server handling by saving log batches in individual transactions — https://github.com/PrefectHQ/prefect/pull/7141 -- Optimize deletion of auto-scheduled runs — https://github.com/PrefectHQ/prefect/pull/7102 + +- Add `--param` / `--params` support `prefect deployment run` — +- Add 'Show Active Runs' button to work queue page — +- Update block protection to only prevent deletion — +- Improve stability by optimizing the HTTP client — +- Optimize flow run history queries — +- Optimize server handling by saving log batches in individual transactions — +- Optimize deletion of auto-scheduled runs — ### Fixes -- Fix `DockerContainer` log streaming crash due to "marked for removal" error — https://github.com/PrefectHQ/prefect/pull/6860 -- Improve RRule schedule string parsing — https://github.com/PrefectHQ/prefect/pull/7133 -- Improve handling of duplicate blocks, reducing errors in server logs — https://github.com/PrefectHQ/prefect/pull/7140 -- Fix flow run URLs in notifications and `prefect deployment run` output — https://github.com/PrefectHQ/prefect/pull/7153 + +- Fix `DockerContainer` log streaming crash due to "marked for removal" error — +- Improve RRule schedule string parsing — +- Improve handling of duplicate blocks, reducing errors in server logs — +- Fix flow run URLs in notifications and `prefect deployment run` output — ### Documentation -- Add documentation for support of proxies — https://github.com/PrefectHQ/prefect/pull/7087 -- Fix rendering of Prefect settings in API reference — https://github.com/PrefectHQ/prefect/pull/7067 + +- Add documentation for support of proxies — +- Fix rendering of Prefect settings in API reference — ### Contributors -* @jmg-duarte -* @kevin868 made their first contribution in https://github.com/PrefectHQ/prefect/pull/7109 -* @space-age-pete made their first contribution in https://github.com/PrefectHQ/prefect/pull/7122 -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.5.0...2.6.0 +- @jmg-duarte + +- @kevin868 made their first contribution in +- @space-age-pete made their first contribution in + +**All changes**: ## Release 2.5.0 ### Exciting New Features 🎉 - Add `prefect.deployments.run_deployment` to create a flow run for a deployment with support for: - - Configurable execution modes: returning immediately or waiting for completion of the run. - - Scheduling runs in the future or now. - - Custom flow run names. - - Automatic linking of created flow run to the flow run it is created from. - - Automatic tracking of upstream task results passed as parameters. + — Configurable execution modes: returning immediately or waiting for completion of the run. + — Scheduling runs in the future or now. + — Custom flow run names. + — Automatic linking of created flow run to the flow run it is created from. + — Automatic tracking of upstream task results passed as parameters.
- See https://github.com/PrefectHQ/prefect/pull/7047, https://github.com/PrefectHQ/prefect/pull/7081, and https://github.com/PrefectHQ/prefect/pull/7084 + See , , and ### Enhancements -- Add ability to delete multiple objects on flow run, flow, deployment and work queue pages — https://github.com/PrefectHQ/prefect/pull/7086 -- Update `put_directory` to exclude directories from upload counts — https://github.com/PrefectHQ/prefect/pull/7054 -- Always suppress griffe logs — https://github.com/PrefectHQ/prefect/pull/7059 -- Add OOM warning to `Process` exit code log message — https://github.com/PrefectHQ/prefect/pull/7070 -- Add idempotency key support to `OrionClient.create_flow_run_from_deployment` — https://github.com/PrefectHQ/prefect/pull/7074 + +- Add ability to delete multiple objects on flow run, flow, deployment and work queue pages — +- Update `put_directory` to exclude directories from upload counts — +- Always suppress griffe logs — +- Add OOM warning to `Process` exit code log message — +- Add idempotency key support to `OrionClient.create_flow_run_from_deployment` — ### Fixes -- Fix default start date filter for deployments page in UI — https://github.com/PrefectHQ/prefect/pull/7025 -- Fix `sync_compatible` handling of wrapped async functions and generators — https://github.com/PrefectHQ/prefect/pull/7009 -- Fix bug where server could error due to an unexpected null in task caching logic — https://github.com/PrefectHQ/prefect/pull/7031 -- Add exception handling to block auto-registration — https://github.com/PrefectHQ/prefect/pull/6997 -- Remove the "sync caller" check from `sync_compatible` — https://github.com/PrefectHQ/prefect/pull/7073 + +- Fix default start date filter for deployments page in UI — +- Fix `sync_compatible` handling of wrapped async functions and generators — +- Fix bug where server could error due to an unexpected null in task caching logic — +- Add exception handling to block auto-registration — +- Remove the "sync caller" check from `sync_compatible` — ### Documentation -- Add `ECSTask` block tutorial to recipes — https://github.com/PrefectHQ/prefect/pull/7066 -- Update documentation for organizations for member management, roles, and permissions — https://github.com/PrefectHQ/prefect/pull/7058 + +- Add `ECSTask` block tutorial to recipes — +- Update documentation for organizations for member management, roles, and permissions — ## Collections + - New [prefect-soda-core](https://sodadata.github.io/prefect-soda-core/) collection for integration with [Soda](https://www.soda.io/). ### Contributors + - @taljaards -**All changes**: https://github.com/PrefectHQ/prefect/compare/2.4.5...2.5.0 +**All changes**: ## Release 2.4.5 @@ -2026,160 +6969,191 @@ Block protection was added in 2.4.1 to prevent users from deleting block types t **When running a server with this version, the client must be the same version. This does not apply to clients connecting to Prefect Cloud.** ### Enhancements -- Warn if user tries to login with API key from Cloud 1 — https://github.com/PrefectHQ/prefect/pull/6958 -- Improve concurrent task runner performance — https://github.com/PrefectHQ/prefect/pull/6948 -- Raise a `MissingContextError` when `get_run_logger` is called outside a run context — https://github.com/PrefectHQ/prefect/pull/6980 -- Adding caching to API configuration lookups to improve performance — https://github.com/PrefectHQ/prefect/pull/6959 -- Move `quote` to `prefect.utilities.annotations` — https://github.com/PrefectHQ/prefect/pull/6993 -- Add state filters and sort-by to the work-queue, flow and deployment pages — https://github.com/PrefectHQ/prefect/pull/6985 + +- Warn if user tries to login with API key from Cloud 1 — +- Improve concurrent task runner performance — +- Raise a `MissingContextError` when `get_run_logger` is called outside a run context — +- Adding caching to API configuration lookups to improve performance — +- Move `quote` to `prefect.utilities.annotations` — +- Add state filters and sort-by to the work-queue, flow and deployment pages — ### Fixes -- Fix login to private Docker registries — https://github.com/PrefectHQ/prefect/pull/6889 -- Update `Flow.with_options` to actually pass retry settings to new object — https://github.com/PrefectHQ/prefect/pull/6963 -- Fix compatibility for protected blocks when client/server versions are mismatched — https://github.com/PrefectHQ/prefect/pull/6986 -- Ensure `python-slugify` is always used even if [unicode-slugify](https://github.com/mozilla/unicode-slugify) is installed — https://github.com/PrefectHQ/prefect/pull/6955 + +- Fix login to private Docker registries — +- Update `Flow.with_options` to actually pass retry settings to new object — +- Fix compatibility for protected blocks when client/server versions are mismatched — +- Ensure `python-slugify` is always used even if [unicode-slugify](https://github.com/mozilla/unicode-slugify) is installed — ### Documentation -- Update documentation for specifying schedules from the CLI — https://github.com/PrefectHQ/prefect/pull/6968 -- Add results concept to documentation — https://github.com/PrefectHQ/prefect/pull/6992 + +- Update documentation for specifying schedules from the CLI — +- Add results concept to documentation — ### Collections -- New [`prefect-hex` collection](https://prefecthq.github.io/prefect-hex/) — https://github.com/PrefectHQ/prefect/pull/6974 -- New [`CloudRunJob` infrastructure block](https://prefecthq.github.io/prefect-gcp/cloud_run/) in `prefect-gcp` — https://github.com/PrefectHQ/prefect-gcp/pull/48 + +- New [`prefect-hex` collection](https://prefecthq.github.io/prefect-hex/) — +- New [`CloudRunJob` infrastructure block](https://prefecthq.github.io/prefect-gcp/cloud_run/) in `prefect-gcp` — ### Contributors -* @Hongbo-Miao made their first contribution in https://github.com/PrefectHQ/prefect/pull/6956 -* @hateyouinfinity made their first contribution in https://github.com/PrefectHQ/prefect/pull/6955 + +- @Hongbo-Miao made their first contribution in + +- @hateyouinfinity made their first contribution in ## Release 2.4.2 ### Fixes -- Remove types in blocks docstring attributes to avoid annotation parsing warnings — https://github.com/PrefectHQ/prefect/pull/6937 -- Fixes `inject_client` in scenarios where the `client` kwarg is passed `None` — https://github.com/PrefectHQ/prefect/pull/6942 + +- Remove types in blocks docstring attributes to avoid annotation parsing warnings — +- Fixes `inject_client` in scenarios where the `client` kwarg is passed `None` — ### Contributors -* @john-jam made their first contribution in https://github.com/PrefectHQ/prefect/pull/6937 + +- @john-jam made their first contribution in ## Release 2.4.1 ### Enhancements -- Add TTL to `KubernetesJob` for automated cleanup of finished jobs — https://github.com/PrefectHQ/prefect/pull/6785 -- Add `prefect kubernetes manifest agent` to generate an agent Kubernetes manifest — https://github.com/PrefectHQ/prefect/pull/6771 -- Add `prefect block type delete` to delete block types — https://github.com/PrefectHQ/prefect/pull/6849 -- Add dynamic titles to tabs in UI — https://github.com/PrefectHQ/prefect/pull/6914 -- Hide secret tails by default — https://github.com/PrefectHQ/prefect/pull/6846 -- Add runs tab to show flow runs on the flow, deployment, and work-queue pages in the UI — https://github.com/PrefectHQ/prefect/pull/6721 -- Add toggle to disable block registration on application start — https://github.com/PrefectHQ/prefect/pull/6858 -- Use injected client during block registration, save, and load — https://github.com/PrefectHQ/prefect/pull/6857 -- Refactor of `prefect.client` into `prefect.client.orion` and `prefect.client.cloud` — https://github.com/PrefectHQ/prefect/pull/6847 -- Improve breadcrumbs on radar page in UI — https://github.com/PrefectHQ/prefect/pull/6757 -- Reject redundant state transitions to prevent duplicate runs — https://github.com/PrefectHQ/prefect/pull/6852 -- Update block auto-registration to use a cache to improve performance — https://github.com/PrefectHQ/prefect/pull/6841 -- Add ability to define blocks from collections to be registered by default — https://github.com/PrefectHQ/prefect/pull/6890 -- Update file systems interfaces to be sync compatible — https://github.com/PrefectHQ/prefect/pull/6511 -- Add flow run URLs to notifications — https://github.com/PrefectHQ/prefect/pull/6798 -- Add client retries on 503 responses — https://github.com/PrefectHQ/prefect/pull/6927 -- Update injected client retrieval to use the flow and task run context client for reduced overhead — https://github.com/PrefectHQ/prefect/pull/6859 -- Add Microsoft Teams notification block — https://github.com/PrefectHQ/prefect/pull/6920 + +- Add TTL to `KubernetesJob` for automated cleanup of finished jobs — +- Add `prefect kubernetes manifest agent` to generate an agent Kubernetes manifest — +- Add `prefect block type delete` to delete block types — +- Add dynamic titles to tabs in UI — +- Hide secret tails by default — +- Add runs tab to show flow runs on the flow, deployment, and work-queue pages in the UI — +- Add toggle to disable block registration on application start — +- Use injected client during block registration, save, and load — +- Refactor of `prefect.client` into `prefect.client.orion` and `prefect.client.cloud` — +- Improve breadcrumbs on radar page in UI — +- Reject redundant state transitions to prevent duplicate runs — +- Update block auto-registration to use a cache to improve performance — +- Add ability to define blocks from collections to be registered by default — +- Update file systems interfaces to be sync compatible — +- Add flow run URLs to notifications — +- Add client retries on 503 responses — +- Update injected client retrieval to use the flow and task run context client for reduced overhead — +- Add Microsoft Teams notification block — ### Fixes -- Fix `LocalFileSystem.get_directory` when from and to paths match — https://github.com/PrefectHQ/prefect/pull/6824 -- Fix registration of block schema versions — https://github.com/PrefectHQ/prefect/pull/6803 -- Update agent to capture infrastructure errors and fail the flow run instead of crashing — https://github.com/PrefectHQ/prefect/pull/6903 -- Fix bug where `OrionClient.read_logs` filter was ignored — https://github.com/PrefectHQ/prefect/pull/6885 + +- Fix `LocalFileSystem.get_directory` when from and to paths match — +- Fix registration of block schema versions — +- Update agent to capture infrastructure errors and fail the flow run instead of crashing — +- Fix bug where `OrionClient.read_logs` filter was ignored — ### Documentation -- Add GitHub and Docker deployment recipe — https://github.com/PrefectHQ/prefect/pull/6825 -- Add parameter configuration examples — https://github.com/PrefectHQ/prefect/pull/6886 + +- Add GitHub and Docker deployment recipe — +- Add parameter configuration examples — ### Collections -- Add `prefect-firebolt` to collections catalog — https://github.com/PrefectHQ/prefect/pull/6917 + +- Add `prefect-firebolt` to collections catalog — ### Helm Charts + - Major overhaul in how helm charts in `prefect-helm` are structured and how we version and release them — [2022.09.21 release](https://github.com/PrefectHQ/prefect-helm/releases/tag/2022.09.21) ### Contributors + - @jmg-duarte - @taljaards - @yashlad681 -- @hallenmaia made their first contributions(!) in https://github.com/PrefectHQ/prefect/pull/6903, https://github.com/PrefectHQ/prefect/pull/6785, and https://github.com/PrefectHQ/prefect/pull/6771 -- @dobbersc made their first contribution in https://github.com/PrefectHQ/prefect/pull/6870 -- @jnovinger made their first contribution in https://github.com/PrefectHQ/prefect/pull/6916 -- @mathijscarlu made their first contribution in https://github.com/PrefectHQ/prefect/pull/6885 - +- @hallenmaia made their first contributions(!) in , , and +- @dobbersc made their first contribution in +- @jnovinger made their first contribution in +- @mathijscarlu made their first contribution in ## Release 2.4.0 ### Exciting New Features 🎉 + - Add `ECSTask` infrastructure block to run commands and flows on AWS ECS
- See [the documentation](https://prefecthq.github.io/prefect-aws/ecs/) in the [prefect-aws collection](https://prefecthq.github.io/prefect-aws/) and usage notes in the [infrastructure guide](https://docs.prefect.io/concepts/infrastructure/#ecstask) + See [the documentation](https://prefecthq.github.io/prefect-aws/ecs/) in the [prefect-aws collection](https://prefecthq.github.io/prefect-aws/) and usage notes in the [infrastructure guide](https://docs.prefect.io/concepts/infrastructure/#ecstask) ### Enhancements -- Update the deployments CLI to better support CI/CD use cases — https://github.com/PrefectHQ/prefect/pull/6697 -- Improve database query performance by removing unnecessary SQL transactions — https://github.com/PrefectHQ/prefect/pull/6714 -- Update blocks to dispatch instance creation using slugs — https://github.com/PrefectHQ/prefect/pull/6622 -- Add flow run start times to flow run metadata in UI — https://github.com/PrefectHQ/prefect/pull/6743 -- Update default infrastructure command to be set at runtime — https://github.com/PrefectHQ/prefect/pull/6610 -- Allow environment variables to be "unset" in infrastructure blocks — https://github.com/PrefectHQ/prefect/pull/6650 -- Add favicon switching feature for flow and task run pages — https://github.com/PrefectHQ/prefect/pull/6794 -- Update `Deployment.infrastructure` to accept types outside of the core library i.e. custom infrastructure or from collections — https://github.com/PrefectHQ/prefect/pull/6674 -- Update `deployment build --rrule` input to allow start date and timezones — https://github.com/PrefectHQ/prefect/pull/6761 + +- Update the deployments CLI to better support CI/CD use cases — +- Improve database query performance by removing unnecessary SQL transactions — +- Update blocks to dispatch instance creation using slugs — +- Add flow run start times to flow run metadata in UI — +- Update default infrastructure command to be set at runtime — +- Allow environment variables to be "unset" in infrastructure blocks — +- Add favicon switching feature for flow and task run pages — +- Update `Deployment.infrastructure` to accept types outside of the core library i.e. custom infrastructure or from collections — +- Update `deployment build --rrule` input to allow start date and timezones — ### Fixes -- Update crash detection to ignore abort signals — https://github.com/PrefectHQ/prefect/pull/6730 -- Protect against race condition with deployment schedules — https://github.com/PrefectHQ/prefect/pull/6673 -- Fix saving of block fields with aliases — https://github.com/PrefectHQ/prefect/pull/6758 -- Preserve task dependencies to futures passed as parameters in `.map` — https://github.com/PrefectHQ/prefect/pull/6701 -- Update task run orchestration to include latest metadata in context — https://github.com/PrefectHQ/prefect/pull/6791 + +- Update crash detection to ignore abort signals — +- Protect against race condition with deployment schedules — +- Fix saving of block fields with aliases — +- Preserve task dependencies to futures passed as parameters in `.map` — +- Update task run orchestration to include latest metadata in context — ### Documentation -- Task runner documentation fixes and clarifications — https://github.com/PrefectHQ/prefect/pull/6733 -- Add notes for Windows and Linux installation — https://github.com/PrefectHQ/prefect/pull/6750 -- Add a catalog of implementation recipes — https://github.com/PrefectHQ/prefect/pull/6408 -- Improve storage and file systems documentation — https://github.com/PrefectHQ/prefect/pull/6756 -- Add CSS for badges — https://github.com/PrefectHQ/prefect/pull/6655 + +- Task runner documentation fixes and clarifications — +- Add notes for Windows and Linux installation — +- Add a catalog of implementation recipes — +- Improve storage and file systems documentation — +- Add CSS for badges — ### Contributors -* @robalar made their first contribution in https://github.com/PrefectHQ/prefect/pull/6701 -* @shraddhafalane made their first contribution in https://github.com/PrefectHQ/prefect/pull/6784 + +- @robalar made their first contribution in + +- @shraddhafalane made their first contribution in ## 2.3.2 ### Enhancements -* UI displays an error message when backend is unreachable - https://github.com/PrefectHQ/prefect/pull/6670 + +- UI displays an error message when backend is unreachable — ### Fixes -* Fix issue where parameters weren't updated when a deployment was re-applied by @lennertvandevelde in https://github.com/PrefectHQ/prefect/pull/6668 -* Fix issues with stopping Orion on Windows machines - https://github.com/PrefectHQ/prefect/pull/6672 -* Fix issue with GitHub storage running in non-empty directories - https://github.com/PrefectHQ/prefect/pull/6693 -* Fix issue where some user-supplied values were ignored when creating new deployments - https://github.com/PrefectHQ/prefect/pull/6695 + +- Fix issue where parameters weren't updated when a deployment was re-applied by @lennertvandevelde in + +- Fix issues with stopping Orion on Windows machines — +- Fix issue with GitHub storage running in non-empty directories — +- Fix issue where some user-supplied values were ignored when creating new deployments — ### Collections -* Added [prefect-fugue](https://fugue-project.github.io/prefect-fugue/) + +- Added [prefect-fugue](https://fugue-project.github.io/prefect-fugue/) ### Contributors -* @lennertvandevelde made their first contribution! — [https://github.com/PrefectHQ/prefect/pull/6668](https://github.com/PrefectHQ/prefect/pull/6668) + +- @lennertvandevelde made their first contribution! — [https://github.com/PrefectHQ/prefect/pull/6668](https://github.com/PrefectHQ/prefect/pull/6668) ## 2.3.1 ### Enhancements -* Add sync compatibility to `run` for all infrastructure types — https://github.com/PrefectHQ/prefect/pull/6654 -* Update Docker container name collision log to `INFO` level for clarity — https://github.com/PrefectHQ/prefect/pull/6657 -* Refactor block documents queries for speed ⚡️ — https://github.com/PrefectHQ/prefect/pull/6645 -* Update block CLI to match standard styling — https://github.com/PrefectHQ/prefect/pull/6679 + +- Add sync compatibility to `run` for all infrastructure types — + +- Update Docker container name collision log to `INFO` level for clarity — +- Refactor block documents queries for speed ⚡️ — +- Update block CLI to match standard styling — ### Fixes -* Add `git` to the Prefect image — https://github.com/PrefectHQ/prefect/pull/6653 -* Update Docker container runs to be robust to container removal — https://github.com/PrefectHQ/prefect/pull/6656 -* Fix parsing of `PREFECT_TEST_MODE` in `PrefectBaseModel` — https://github.com/PrefectHQ/prefect/pull/6647 -* Fix handling of `.prefectignore` paths on Windows — https://github.com/PrefectHQ/prefect/pull/6680 + +- Add `git` to the Prefect image — + +- Update Docker container runs to be robust to container removal — +- Fix parsing of `PREFECT_TEST_MODE` in `PrefectBaseModel` — +- Fix handling of `.prefectignore` paths on Windows — ### Collections -* [prefect-juptyer](https://prefecthq.github.io/prefect-jupyter/) + +- [prefect-juptyer](https://prefecthq.github.io/prefect-jupyter/) ### Contributors -* @mars-f made their first contribution — https://github.com/PrefectHQ/prefect/pull/6639 -* @pdashk made their first contribution — https://github.com/PrefectHQ/prefect/pull/6640 + +- @mars-f made their first contribution — + +- @pdashk made their first contribution — ## 2.3.0 @@ -2214,13 +7188,14 @@ Block protection was added in 2.4.1 to prevent users from deleting block types t - Add the ability to specify relative sub-paths when working with remote storage for deployments — [#6518](https://github.com/PrefectHQ/prefect/pull/6518) - Prevent non-UUID slugs from raising errors on `/block_document` endpoints — [#6541](https://github.com/PrefectHQ/prefect/pull/6541) - Improve Docker image tag parsing to support the full Moby specification — [#6564](https://github.com/PrefectHQ/prefect/pull/6564) + ### Fixes - Set uvicorn `--app-dir` when starting Orion to avoid module collisions — [#6547](https://github.com/PrefectHQ/prefect/pull/6547) - Resolve issue with Python-based deployments having incorrect entrypoint paths — [#6554](https://github.com/PrefectHQ/prefect/pull/6554) - Fix Docker image tag parsing when ports are included — [#6567](https://github.com/PrefectHQ/prefect/pull/6567) - Update Kubernetes Job to use `args` instead of `command` to respect image entrypoints — [#6581](https://github.com/PrefectHQ/prefect/pull/6581) - - Warning: If you are using a custom image with an entrypoint that does not allow passthrough of commands, flow runs will fail. + — Warning: If you are using a custom image with an entrypoint that does not allow passthrough of commands, flow runs will fail. - Fix edge case in `sync_compatible` detection when using AnyIO task groups — [#6602](https://github.com/PrefectHQ/prefect/pull/6602) - Add check for infrastructure and storage block capabilities during deployment build — [#6535](https://github.com/PrefectHQ/prefect/pull/6535) - Fix issue where deprecated work queue pages showed multiple deprecation notices — [#6531](https://github.com/PrefectHQ/prefect/pull/6531) @@ -2237,7 +7212,6 @@ Block protection was added in 2.4.1 to prevent users from deleting block types t - Add details about flow run retention policies — [#6577](https://github.com/PrefectHQ/prefect/pull/6577) - Fix flow parameter name docstring in deployments — [#6599](https://github.com/PrefectHQ/prefect/pull/6599) - ### Contributors Thanks to our external contributors! @@ -2249,74 +7223,85 @@ Thanks to our external contributors! ## 2.2.0 ### Exciting New Features 🎉 -* Added automatic detection of static arguments to `Task.map` in https://github.com/PrefectHQ/prefect/pull/6513 + +- Added automatic detection of static arguments to `Task.map` in ### Fixes -* Updated deployment flow run retry settings with runtime values in https://github.com/PrefectHQ/prefect/pull/6489 -* Updated media queries for flow-run-filter in https://github.com/PrefectHQ/prefect/pull/6484 -* Added `empirical_policy` to flow run update route in https://github.com/PrefectHQ/prefect/pull/6486 -* Updated flow run policy retry settings to be nullable in https://github.com/PrefectHQ/prefect/pull/6488 -* Disallowed extra attribute initialization on `Deployment` objects in https://github.com/PrefectHQ/prefect/pull/6505 -* Updated `deployment build` to raise an informative error if two infrastructure configs are provided in https://github.com/PrefectHQ/prefect/pull/6504 -* Fixed calling async subflows from sync parents in https://github.com/PrefectHQ/prefect/pull/6514 + +- Updated deployment flow run retry settings with runtime values in + +- Updated media queries for flow-run-filter in +- Added `empirical_policy` to flow run update route in +- Updated flow run policy retry settings to be nullable in +- Disallowed extra attribute initialization on `Deployment` objects in +- Updated `deployment build` to raise an informative error if two infrastructure configs are provided in +- Fixed calling async subflows from sync parents in ## 2.1.1 ### Fixes -* Fixed log on abort when the flow run context is not available in https://github.com/PrefectHQ/prefect/pull/6402 -* Fixed error message in `submit_run` in https://github.com/PrefectHQ/prefect/pull/6453 -* Fixed error if default parameters are missing on a deployment flow run in https://github.com/PrefectHQ/prefect/pull/6465 -* Added error message if `get_run_logger` receives context of unknown type in https://github.com/PrefectHQ/prefect/pull/6401 +- Fixed log on abort when the flow run context is not available in +- Fixed error message in `submit_run` in +- Fixed error if default parameters are missing on a deployment flow run in +- Added error message if `get_run_logger` receives context of unknown type in ## 2.1.0 ### Build Deployments in Python + The new, YAML-based deployment definition provides a simple, extensible foundation for our new deployment creation experience. Now, by popular demand, we're extending that experience to enable you to define deployments and build them from within Python. You can do so by defining a `Deployment` Python object, specifying the deployment options as properties of the object, then building and applying the object using methods of `Deployment`. See the [documentation](https://docs.prefect.io/concepts/deployments/) to learn more. ### Simplified Agents & Work Queues + Agents and work queues give you control over where and how flow runs are executed. Now, creating an agent (and corresponding work queue) is even easier. Work queues now operate strictly by name, not by matching tags. Deployments, and the flow runs they generate, are explicitly linked to a single work queue, and the work queue is automatically created whenever a deployment references it. This means you no longer need to manually create a new work queue each time you want to want to route a deployment's flow runs separately. Agents can now pull from multiple work queues, and also automatically generate work queues that don't already exist. The result of these improvements is that most users will not have to interact directly with work queues at all, but advanced users can take advantage of them for increased control over how work is distributed to agents. These changes are fully backwards compatible. See the [documentation](https://docs.prefect.io/concepts/work-queues/) to learn more. ### Improvements and bug fixes -* Added three new exceptions to improve errors when parameters are incorrectly supplied to flow runs in https://github.com/PrefectHQ/prefect/pull/6091 -* Fixed a task dependency issue where unpacked values were not being correctly traced in https://github.com/PrefectHQ/prefect/pull/6348 -* Added the ability to embed `BaseModel` subclasses as fields within blocks, resolving an issue with the ImagePullPolicy field on the KubernetesJob block in https://github.com/PrefectHQ/prefect/pull/6389 -* Added comments support for deployment.yaml to enable inline help in https://github.com/PrefectHQ/prefect/pull/6339 -* Added support for specifying three schedule types - cron, interval and rrule - to the `deployment build` CLI in https://github.com/PrefectHQ/prefect/pull/6387 -* Added error handling for exceptions raised during the pre-transition hook fired by an OrchestrationRule during state transitions in https://github.com/PrefectHQ/prefect/pull/6315 -* Updated `visit_collection` to be a synchronous function in https://github.com/PrefectHQ/prefect/pull/6371 -* Revised loop service method names for clarity in https://github.com/PrefectHQ/prefect/pull/6131 -* Modified deployments to load flows in a worker thread in https://github.com/PrefectHQ/prefect/pull/6340 -* Resolved issues with capture of user-raised timeouts in https://github.com/PrefectHQ/prefect/pull/6357 -* Added base class and async compatibility to DockerRegistry in https://github.com/PrefectHQ/prefect/pull/6328 -* Added `max_depth` to `visit_collection`, allowing recursion to be limited in https://github.com/PrefectHQ/prefect/pull/6367 -* Added CLI commands for inspecting and deleting Blocks and Block Types in https://github.com/PrefectHQ/prefect/pull/6422 -* Added a Server Message Block (SMB) file system block in https://github.com/PrefectHQ/prefect/pull/6344 - Special thanks to @darrida for this contribution! -* Removed explicit type validation from some API routes in https://github.com/PrefectHQ/prefect/pull/6448 -* Improved robustness of streaming output from subprocesses in https://github.com/PrefectHQ/prefect/pull/6445 -* Added a default work queue ("default") when creating new deployments from the Python client or CLI in https://github.com/PrefectHQ/prefect/pull/6458 + +- Added three new exceptions to improve errors when parameters are incorrectly supplied to flow runs in + +- Fixed a task dependency issue where unpacked values were not being correctly traced in +- Added the ability to embed `BaseModel` subclasses as fields within blocks, resolving an issue with the ImagePullPolicy field on the KubernetesJob block in +- Added comments support for deployment.yaml to enable inline help in +- Added support for specifying three schedule types — cron, interval and rrule — to the `deployment build` CLI in +- Added error handling for exceptions raised during the pre-transition hook fired by an OrchestrationRule during state transitions in +- Updated `visit_collection` to be a synchronous function in +- Revised loop service method names for clarity in +- Modified deployments to load flows in a worker thread in +- Resolved issues with capture of user-raised timeouts in +- Added base class and async compatibility to DockerRegistry in +- Added `max_depth` to `visit_collection`, allowing recursion to be limited in +- Added CLI commands for inspecting and deleting Blocks and Block Types in +- Added a Server Message Block (SMB) file system block in — Special thanks to @darrida for this contribution! +- Removed explicit type validation from some API routes in +- Improved robustness of streaming output from subprocesses in +- Added a default work queue ("default") when creating new deployments from the Python client or CLI in ### New Collections + - [prefect-monday](https://prefecthq.github.io/prefect-monday/) - [prefect-databricks](https://prefecthq.github.io/prefect-databricks/) - [prefect-fugue](https://github.com/fugue-project/prefect-fugue/) -**Full Changelog**: https://github.com/PrefectHQ/prefect/compare/2.0.4...2.1.0 +**Full Changelog**: ## 2.0.4 ### Simplified deployments + The deployment experience has been refined to remove extraneous artifacts and make configuration even easier. In particular: -- `prefect deployment build` no longer generates a  `manifest.json` file. Instead, all of the relevant information is written to the `deployment.yaml` file. +- `prefect deployment build` no longer generates a  `manifest.json` file. Instead, all of the relevant information is written to the `deployment.yaml` file. - Values in the `deployment.yaml` file are more atomic and explicit -- Local file system blocks are no longer saved automatically -- Infrastructure block values can now be overwritten with the new `infra_overrides` field +- Local file system blocks are no longer saved automatically +- Infrastructure block values can now be overwritten with the new `infra_overrides` field ### Start custom flow runs from the UI + Now, from the deployment page, in addition to triggering an immediate flow run with default parameter arguments, you can also create a custom run. A custom run enables you to configure the run's parameter arguments, start time, name, and more, all while otherwise using the same deployment configuration. The deployment itself will be unchanged and continue to generate runs on its regular schedule. ### Improvements and bug fixes + - Made timeout errors messages on state changes more intuitive - Added debug level logs for task run rehydration - Added basic CLI functionality to inspect Blocks; more to come @@ -2329,9 +7314,10 @@ This release contains a number of bug fixes and documentation improvements. ### Introducing [`prefect-dbt`](https://prefecthq.github.io/prefect-dbt/) -We've released `prefect-dbt` - a collection of Prefect integrations for working with dbt in your Prefect flows. This collection has been built as part of a partnership with dbt Labs to ensure that it follows best practices for working with dbt. +We've released `prefect-dbt` — a collection of Prefect integrations for working with dbt in your Prefect flows. This collection has been built as part of a partnership with dbt Labs to ensure that it follows best practices for working with dbt. ### Improvements and bug fixes + - Azure storage blocks can use `.prefectignore` - Resolved bugs and improved interface in the Orion client. - Resolved a bug in Azure storage blocks that would cause uploads to get stuck. @@ -2343,15 +7329,16 @@ We've released `prefect-dbt` - a collection of Prefect integrations for working - Fixes `--manifest-only` flag of `prefect deployment build` command to ensure that using this flag, the manifest gets generated, but the upload to a storage location is skipped. - Added support for multiple YAML deployment paths to the `prefect deployment apply` command. - ## 2.0.2 This release implements a number of improvements and bug fixes in response to continued engagement by members of our community. Thanks, as always, to all who submitted ideas on how to make Prefect 2 even better. ### Introducing .prefectignore files - .prefectignore files allow users to omit certain files or directories from their deployments. Similar to other .ignore files, the syntax supports pattern matching, so an entry of `*.pyc` will ensure *all* .pyc files are ignored by the deployment call when uploading to remote storage. Prefect provides a default .prefectignore file, but users can customize it to their needs. + +.prefectignore files allow users to omit certain files or directories from their deployments. Similar to other .ignore files, the syntax supports pattern matching, so an entry of `*.pyc` will ensure _all_ .pyc files are ignored by the deployment call when uploading to remote storage. Prefect provides a default .prefectignore file, but users can customize it to their needs. ### Improvements and bug fixes + - Users can now leverage Azure storage blocks. - Users can now submit bug reports and feature enhancements using our issue templates. - Block deletion is now more performant. @@ -2364,6 +7351,7 @@ This release implements a number of improvements and bug fixes in response to co The response to Prefect 2 has been overwhelming in the best way possible. Thank you to the many community members who tried it out and gave us feedback! Thanks in particular to the students at this week's Prefect Associate Certification Course (PACC) in San Jose for their thoughtful recommendations. This release is a compilation of enhancements and fixes that make for a more resilient, performant, and refined Prefect experience. ### Improvements and bug fixes + - Schedules set via the API or UI are now preserved when building deployments from the CLI - JSON types are now coerced to none, following Javascript convention and supporting standards compatibility - The `prefect deployment execute` command has been removed to avoid confusion between running a flow locally from a Python script and running it by an agent using `prefect deployment run` @@ -2381,23 +7369,28 @@ We're thrilled to announce that, with this release, Prefect 2.0 has exited its p Prefect 2.0 documentation is now hosted at [docs.prefect.io](https://docs.prefect.io). Prefect 1.0 documentation is now hosted at [docs-v1.prefect.io](https://docs-v1.prefect.io). ### Upgrading from Prefect 1.0 + Flows written with Prefect 1.0 will require modifications to run with Prefect 2.0. If you're using Prefect 1.0, please see our [guidance on Discourse for explicitly pinning your Prefect version in your package manager and Docker](https://discourse.prefect.io/t/the-general-availability-release-of-prefect-2-0-going-live-on-wednesday-27th-of-july-may-break-your-flows-unless-you-take-action-as-soon-as-possible/1227), so that you can make the transition to Prefect 2.0 when the time is right for you. See our [migration page](https://upgrade.prefect.io/) to learn more about upgrading. ### Upgrading from earlier versions of Prefect 2.0 + We have shipped a lot of breaking changes to Prefect 2.0 over the past week. Most importantly, **recent changes to deployments required that schedules for all previously created deployments be turned off**. You can learn more about the changes via the [deployments concept documentation](https://docs.prefect.io/concepts/deployments/), the [tutorial](https://docs.prefect.io/tutorials/deployments/), or the [discourse guide](https://discourse.prefect.io/t/deployments-are-now-simpler-and-declarative/1255). ## 2.0b16 ### Simplified, declarative deployments + Prefect 2.0's deployments are a powerful way to encapsulate a flow, its required infrastructure, its schedule, its parameters, and more. Now, you can create deployments simply, with just two commands: + 1. `prefect deployment build ./path/to/flow/file.py:name_of_flow_obj --name "Deployment Name"` produces two files: - - A manifest file, containing workflow-specific information such as the code location, the name of the entrypoint flow, and flow parameters - - A `deployment.yaml` file - a complete specification of the metadata and configuration for the deployment such as the name, tags, and description -3. `prefect deployment apply ./deployment.yaml` creates or updates a deployment with the Orion server + — A manifest file, containing workflow-specific information such as the code location, the name of the entrypoint flow, and flow parameters + — A `deployment.yaml` file — a complete specification of the metadata and configuration for the deployment such as the name, tags, and description +2. `prefect deployment apply ./deployment.yaml` creates or updates a deployment with the Orion server Once the deployment is created with the Orion server, it can now be edited via the UI! See the [Deployments documentation to learn more](https://orion-docs.prefect.io/concepts/deployments/). ### Improvements and bug fixes + - The [Dask and Ray tutorials](https://orion-docs.prefect.io/tutorials/dask-ray-task-runners/) have been updated to reflect recent changes - The [Blocks concept doc](https://orion-docs.prefect.io/concepts/blocks/) has been updated to reflect recent enhancements and includes additional examples - The [Storage concept doc](https://orion-docs.prefect.io/concepts/storage/) has been updated to reflect recent enhancements @@ -2408,6 +7401,7 @@ Once the deployment is created with the Orion server, it can now be edited via t ## 2.0b15 ### Uniquely refer to blocks with slugs + Blocks are a convenient way to secure store and retrieve configuration. Now, retrieving configuration stored with blocks is even easier with slugs, both human and machine readable unique identifiers. By default, block type slugs are a lowercase, dash delimited version of the block type name, but can be customized via the `_block_type_slug` field on a custom Block subclass. Block document slugs are a concatenation of [block-type-slug]/[block-document-name] and can be used as an argument to the `Block.load` method. Slugs and block document names may only include alphanumeric characters and dashes. **Warning**: This breaking change makes this release incompatible with previous versions of the Orion server and Prefect Cloud 2.0 @@ -2416,15 +7410,18 @@ Blocks are a convenient way to secure store and retrieve configuration. Now, ret ## 2.0b14 -### Retreive the state of your tasks or flows with the `return_state` kwarg +### Retrieve the state of your tasks or flows with the `return_state` kwarg + Beginning with 2.0b9, Prefect 2.0 began returning function results, instead of Prefect futures and states, by default. States are still an important concept in Prefect 2. They can be used to dictate and understand the behavior of your flows. Now, you can access the state for _any_ task or flow with the new `return_state` kwarg. Just set `return_state=True` in you flow or task call and you can access its state with the `.result()` method, even if it's been submitted to a task runner. ### `prefect cloud` commands are easier to use + The `prefect cloud login` command no longer overwrites your current profile with a new API URL and auth key. Instead, the command will prompt you to create a new profile when logging into Prefect Cloud 2.0. Subsequent calls to prefect cloud login using the same key will simply "log in" to prefect cloud by switching to the profile associated with that authentication key. -The new `prefect cloud workspace ls` command lists availible workspaces. +The new `prefect cloud workspace ls` command lists available workspaces. ### Other improvements and bug fixes + - The anchor datetime (aka start datetime) for all newly created interval schedules will be the current date & time - The `prefect orion start` command now handles keyboard interrupts - CLI performance has been sped up 30-40% through improved import handling @@ -2434,6 +7431,7 @@ The new `prefect cloud workspace ls` command lists availible workspaces. ## 2.0b13 ### Improvements and bug fixes + - RRule schedule strings are now validated on initialization to confirm that the provided RRule strings are valid - Concepts docs have been updated for clarity and consistency - `IntervalSchedule`'s now coerce naive datetimes to timezone-aware datetimes, so that interval schedules created with timezone-unaware datetimes will work @@ -2441,9 +7439,11 @@ The new `prefect cloud workspace ls` command lists availible workspaces. ## 2.0b12 ### Work queue pages now display upcoming runs + A new "Upcoming runs" tab has been added to the work queue page, enabling you to see all of the runs that are eligible for that work queue before they are picked up by an agent. ### Other improvements and bug fixes + - You can now set a concurrency limit when creating a work queue via the CLI - In order to avoid unwittingly breaking references to shared blocks, block names are no longer editable - Getting started documentation has been updated and edited for clarity @@ -2454,9 +7454,11 @@ A new "Upcoming runs" tab has been added to the work queue page, enabling you to This release builds upon the collection of small enhancements made in the previous release. ### Default storage has been removed + For convenience, earlier versions of Prefect 2.0 allowed for a global storage setting. With forthcoming enhancements to blocks, this will no longer be necessary. ### Other improvements and bug fixes + - We have published a [guide for migrating workflows from Prefect 1.0 (and lower) to Prefect 2.0](https://orion-docs.prefect.io/migration_guide/) - The Flow run page now has a clearer empty state that is more consistent with other pages - Tutorial documentation has been further updated to reflect new result behavior @@ -2471,6 +7473,7 @@ For convenience, earlier versions of Prefect 2.0 allowed for a global storage se This release is the first of a series of smaller releases to be released daily. ### Improvements and bug fixes + - The Blocks selection page now includes more complete and consistent metadata about each block type, including block icons, descriptions, and examples - We've added a new [CLI style guide](https://github.com/PrefectHQ/prefect/blob/orion/docs/contributing/style.md#command-line-interface-cli-output-messages) for contributors - Work queues no longer filter on flow runner types, this capability will instead be achieved through tags @@ -2483,11 +7486,13 @@ Big things are in the works for Prefect 2! This release includes breaking change **With next week's release on July 27th, Prefect 2 will become the default package installed with `pip install prefect`. Flows written with Prefect 1 will require modifications to run with Prefect 2**. Please ensure that your package management process enables you to make the transition when the time is right for you. ### Code as workflows -As Prefect 2 usage has grown, we've observed a pattern among users, especially folks that were not previously users of Prefect 1. Working with Prefect was so much like working in native Python, users were often surprised that their tasks returned futures and states, Prefect objects, rather than results, the data that their Python functions were handling. This led to unfamiliar, potentially intimidating, errors in some cases. With this release, Prefect moves one step closer to code as workflows - tasks now return the results of their functions, rather than their states, by default. This means that you can truly take most native Python scripts, add the relevant @flow and @task decorators, and start running that script as a flow, benefitting from the observability and resilience that Prefect provides. + +As Prefect 2 usage has grown, we've observed a pattern among users, especially folks that were not previously users of Prefect 1. Working with Prefect was so much like working in native Python, users were often surprised that their tasks returned futures and states, Prefect objects, rather than results, the data that their Python functions were handling. This led to unfamiliar, potentially intimidating, errors in some cases. With this release, Prefect moves one step closer to code as workflows — tasks now return the results of their functions, rather than their states, by default. This means that you can truly take most native Python scripts, add the relevant @flow and @task decorators, and start running that script as a flow, benefitting from the observability and resilience that Prefect provides. States and futures are still important concepts in dictating and understanding the behavior of flows. You will still be able to easily access and use them with the `.submit()` method. You will need to modify tasks in existing Prefect 2 flows to use this method to continue working as before. ### Other improvements and bug fixes + - A new `Secret` block can store a string that is encrypted at rest as well as obfuscated in logs and the UI - Date filters on the flow run page in the UI now support filtering by date _and_ time - Each work queue page in the UI now includes a command to start a corresponding agent @@ -2504,11 +7509,13 @@ This is our biggest release yet! It's full of exciting new features and refineme This release removes the deprecated `DaskTaskRunner` and `RayTaskRunner` from the core library, breaking existing references to them. You can find them in their respective collections [prefect-ray](https://prefecthq.github.io/prefect-ray/) and [prefect-dask](https://prefecthq.github.io/prefect-dask). It also removes the previously deprecated restart policy for the `KubernetesFlowRunnner`. Most importantly, there are new **breaking changes** to the Deployments interface described below. ### Flow Run Retries -Flow run retries have been one of our most requested features, especially given how easy it is to run a flow as a "subflow" or "child flow" with Prefect 2.0. Flow run retries are configured just as task retries are - with the `retries` and `retry_delay_seconds` parameters. + +Flow run retries have been one of our most requested features, especially given how easy it is to run a flow as a "subflow" or "child flow" with Prefect 2.0. Flow run retries are configured just as task retries are — with the `retries` and `retry_delay_seconds` parameters. If both a task and its flow have retries configured, tasks within the flow will retry up to their specified task retry limit for each flow run. For example, if you have a **flow** configured with a limit of 2 retries (up to 3 total runs, including the initial attempt), and a **task** in the flow configured with 3 retries (up to 4 attempts per flow run, including the initial attempt). The task could run up to a total of 12 attempts, since task retry limits are reset after each flow run or flow run attempt. ### Notifications + At any time, you can visit the Prefect UI to get a comprehensive view of the state of all of your flows, but when something goes wrong with one of them, you need that information immediately. Prefect 2.0’s new notifications can alert you and your team when any flow enters any state you specify, with or without specific tags. To create a notification, go to the new Notifications page via the sidebar navigation and select “Create Notification.” Notifications are structured just as you would describe them to someone. For example, if I want to get a Slack message every time my daily-ETL flow fails, my notification will simply read: @@ -2522,7 +7529,9 @@ When the conditions of the notification are triggered, you’ll receive a simple Currently, notifications can only be sent to a [Slack webhook](https://api.slack.com/messaging/webhooks) (or email addresses if you are using [Prefect Cloud 2.0](https://app.prefect.cloud)). Over time, notifications will support additional messaging services. Let us know which messaging services you’d like to send your notifications to! ### Flow packaging and deployment + We've revisited our flow packaging and deployment UX, making it both more powerful and easier to use. `DeploymentSpec`s are now just `Deployment`s. Most of the fields are unchanged, but there are a few differences: + - The `flow_storage` field has been replaced with a `packager` field. - The `flow_location`, `flow_name`, and `flow` parameters are now just `flow`. @@ -2543,27 +7552,31 @@ Learn more in the [Deployment concept documentation](https://docs.prefect.io/con You can continue to use your existing `DeploymentSpec`s, but they are deprecated and will be removed in the coming weeks. ### Blocks -We've been working on Blocks behind the scenes for a while. Whether you know it or not, if you've used the past few releases, you've used them. Blocks enable you to securely store configuration with the Prefect Orion server and access it from your code later with just a simple reference. Think of Blocks as secure, UI-editable, type-checked environment variables. We're starting with just a few Blocks - mostly storage, but over time we’ll expand this pattern to include every tool and service in the growing modern data stack. You'll be able to set up access to your entire stack once in just a few minutes, then manage access forever without editing your code. In particular, we've made the following enhancements: + +We've been working on Blocks behind the scenes for a while. Whether you know it or not, if you've used the past few releases, you've used them. Blocks enable you to securely store configuration with the Prefect Orion server and access it from your code later with just a simple reference. Think of Blocks as secure, UI-editable, type-checked environment variables. We're starting with just a few Blocks — mostly storage, but over time we’ll expand this pattern to include every tool and service in the growing modern data stack. You'll be able to set up access to your entire stack once in just a few minutes, then manage access forever without editing your code. In particular, we've made the following enhancements: + - Block document values can now be updated via the Python client with the `overwrite` flag. - Blocks now support secret fields. By default, fields identified as secret will be obfuscated when returned to the Prefect UI. The actual values can still be retrieved as necessary. -- `BlockSchema` objects have a new `secret_fields: List[str]` item in their schema's extra fields. This is a list of all fields that should be considered "secret". It also includes any secret fields from nested blocks referenced by the schema. +- `BlockSchema` objects have a new `secret_fields: List[str]` item in their schema's extra fields. This is a list of all fields that should be considered "secret". It also includes any secret fields from nested blocks referenced by the schema. - You can now browse your Blocks on the new "Blocks" page, create, and edit them right in the UI. ### Other Improvements + - Task keys, previously a concatenation of several pieces of metadata, are now only the qualified function name. While it is likely to be globally unique, the key can be used to easily identify every instance in which a function of the same name is utilized. - Tasks now have a `version` that you can set via the task decorator, like the flow version identifier on flow runs. - An Orion setting, `PREFECT_ORION_DATABASE_PASSWORD`, has been added to allow templating in the database connection URL - A link to API reference documentation has been added to the Orion startup message. - Where possible, Prefect 2.0 now exits processes earlier for synchronous flow or task runs that are cancelled. This reduces the range of conditions under which a task run would be marked failed, but continue to run. - All Prefect client models now allow extras, while the API continues to forbid them, such that older Prefect 2.0 clients can receive and load objects from the API that have additional fields, facilitating backwards compatibility. -- The _all_ attribute has been added to __init__.py for all public modules, declaring the public API for export. +- The _all_ attribute has been added to **init**.py for all public modules, declaring the public API for export. - A new endpoint, `/deployments/{id}/work_queue_check`, enables you to to check which work queues the scheduled runs of a deployment will be eligible for. - ### Bug fixes + - Attempting to create a schedule with a cron string that includes a "random" or "hashed" expression will now return an error. ### Contributors + - [Cole Murray](https://github.com/ColeMurray) - [Oliver Mannion](https://github.com/tekumara) - [Steve Flitcroft](https://github.com/redsquare) @@ -2574,6 +7587,7 @@ We've been working on Blocks behind the scenes for a while. Whether you know it This release includes a number of important improvements and bug fixes in response to continued feedback from the community. Note that this release makes a **breaking change** to the Blocks API, making the `2.0b7` Orion server incompatible with previous Orion client versions.``` ### Improvements + - Added the color select to the Orion UI in OSS (enabling users to change their state color scheme) for the UI. - Added anonymous blocks, allowing Prefect to dynamically store blocks for you without cluttering your workspace. - Performance improvements to the service that marks flows runs as late. @@ -2584,12 +7598,14 @@ This release includes a number of important improvements and bug fixes in respon - Improved task naming conventions for tasks defined using lambda functions ### Documentation improvements + - Updated screenshots and description of workflows to reflect new UI - Revised and extended Prefect Cloud quickstart tutorial - Added deployments page - Added documentation for `prefect cloud workspace set` command ### Collections + - [prefect-sqlalchemy](https://prefecthq.github.io/prefect-sqlalchemy/) - [prefect-dask](https://prefecthq.github.io/prefect-dask/) - [prefect-ray](https://prefecthq.github.io/prefect-ray/) @@ -2600,6 +7616,7 @@ This release includes a number of important improvements and bug fixes in respon Note that the Dask and Ray task runners have been moved out of the Prefect core library to reduce the number of dependencies we require for most use cases. Install from the command line with `pip install prefect-dask` and import with `from prefect_dask.task_runners import DaskTaskRunner`. ### Bug fixes + - [Allow Orion UI to run on Windows](https://github.com/PrefectHQ/prefect/pull/5802) - Fixed a bug in terminal state data handling that caused timeouts - Disabled flow execution during deployment creation to prevent accidental execution. @@ -2627,10 +7644,10 @@ Note that this release makes a **breaking change** to the Blocks API, making the After the upgrade your data will remain intact, but you will need to upgrade to `2.0b6` to continue using the Cloud 2.0 API. You can upgrade in just a few simple steps: -- Install the latest Prefect 2.0 python package: `pip install -U "prefect>=2.0b6"` -- Restart any existing agent processes - - If you are using an agent running on Kubernetes, update the Prefect image version to `2.0b6` in your Kubernetes manifest and re-apply the deployment. - - You don't need to recreate any deployments or pause any schedules - stopping your agent process to perform an upgrade may result in some Late Runs, but those will be picked up once you restart your agent. +- Install the latest Prefect 2.0 python package: `pip install -U "prefect>=2.0b6"` +- Restart any existing agent processes + - If you are using an agent running on Kubernetes, update the Prefect image version to `2.0b6` in your Kubernetes manifest and re-apply the deployment. + - You don't need to recreate any deployments or pause any schedules — stopping your agent process to perform an upgrade may result in some Late Runs, but those will be picked up once you restart your agent. ## 2.0b5 @@ -2652,7 +7669,6 @@ While most of the development of Prefect 2.0 is still happening internally, we'r - @mkarbo - @AlessandroLollo - ### Flow and task runners - Flow runners now pass all altered settings to their jobs instead of just the API key and URL @@ -2752,7 +7768,6 @@ You might not see these fixes in your day-to-day, but we're dedicated to improvi ## 2.0b2 - ### Improvements - Docker flow runners can connect to local API applications on Linux without binding to `0.0.0.0`. @@ -2760,7 +7775,7 @@ You might not see these fixes in your day-to-day, but we're dedicated to improvi ### Bug fixes -- The CLI no longer displays tracebacks on sucessful exit. +- The CLI no longer displays tracebacks on successful exit. - Returning pandas objects from tasks does not error. - Flows are listed correctly in the UI dashboard. @@ -2768,9 +7783,10 @@ You might not see these fixes in your day-to-day, but we're dedicated to improvi We are excited to introduce this branch as [Prefect 2.0](https://www.prefect.io/blog/introducing-prefect-2-0/), powered by [Orion, our second-generation orchestration engine](https://www.prefect.io/blog/announcing-prefect-orion/)! We will continue to develop Prefect 2.0 on this branch. Both the Orion engine and Prefect 2.0 as a whole will remain under active development in beta for the next several months, with a number of major features yet to come. -This is the first release that's compatible with Prefect Cloud 2.0's beta API - more exciting news to come on that soon! +This is the first release that's compatible with Prefect Cloud 2.0's beta API — more exciting news to come on that soon! ### Expanded UI + Through our technical preview phase, our focus has been on establishing the right [concepts](https://docs.prefect.io/concepts/overview/) and making them accessible through the CLI and API. Now that some of those concepts have matured, we've made them more accessible and tangible through UI representations. This release adds some very important concepts to the UI: **Flows and deployments** @@ -2782,6 +7798,7 @@ If you've ever created a deployment without a schedule, you know it can be diffi With the [2.0a13 release](https://github.com/PrefectHQ/prefect/blob/orion/RELEASE-NOTES.md#work-queues), we introduced [work queues](https://docs.prefect.io/concepts/work-queues/), which could only be created through the CLI. Now, you can create and edit work queues directly from the UI, then copy, paste, and run a command that starts an agent that pulls work from that queue. ### Collections + Prefect Collections are groupings of pre-built tasks and flows used to quickly build data flows with Prefect. Collections are grouped around the services with which they interact. For example, to download data from an S3 bucket, you could use the `s3_download` task from the [prefect-aws collection](https://github.com/PrefectHQ/prefect-aws), or if you want to send a Slack message as part of your flow you could use the `send_message` task from the [prefect-slack collection](https://github.com/PrefectHQ/prefect-slack). @@ -2809,6 +7826,7 @@ SETTING = "VALUE" ``` ### Other enhancements + - It's now much easier to explore Prefect 2.0's major entities, including flows, deployments, flow runs, etc. through the CLI with the `ls` command, which produces consistent, beautifully stylized tables for each entity. - Improved error handling for issues that the client commonly encounters, such as network errors, slow API requests, etc. - The UI has been polished throughout to be sleeker, faster, and even more intuitive. @@ -2857,11 +7875,13 @@ We've also rehauled our [settings reference](https://docs.prefect.io/api-ref/pre ## 2.0a12 ### Filters + Orion captures valuable metadata about your flows, deployments, and their runs. We want it to be just as simple to retrieve this information as it is to record it. This release exposes a powerful set of filter operations to cut through this body of information with ease and precision. Want to see all of the runs of your Daily ETL flow? Now it's as easy as typing `flow:"Daily ETL"` into the filter bar. This update also includes a query builder UI, so you can utilize and learn these operators quickly and easily. ## 2.0a11 ### Run Orion on Kubernetes + You can now can run the Orion API, UI, and agent on Kubernetes. We've included a new Prefect CLI command, `prefect kubernetes manifest orion`, that you can use to automatically generate a manifest that runs Orion as a Kubernetes deployment. Note: Prefect 2.0 beta versions prior to 2.0b6 used the CLI command `prefect orion kubernetes-manifest`. @@ -2911,10 +7931,10 @@ The CLI has gotten some love with miscellaneous additions and refinements: - Added `prefect --version` and `prefect -v` to expose version info - Updated `prefect` to display `prefect --help` - Enhanced `prefect dev` commands: - - Added `prefect dev container` to start a container with local code mounted - - Added `prefect dev build-image` to build a development image - - Updated `prefect dev start` to hot-reload on API and agent code changes - - Added `prefect dev api` and `prefect dev agent` to launch hot-reloading services individually + — Added `prefect dev container` to start a container with local code mounted + — Added `prefect dev build-image` to build a development image + — Updated `prefect dev start` to hot-reload on API and agent code changes + — Added `prefect dev api` and `prefect dev agent` to launch hot-reloading services individually ### Other enhancements @@ -2928,7 +7948,7 @@ The CLI has gotten some love with miscellaneous additions and refinements: ### Logs -This release marks another major milestone on Orion's continued evolution into a production ready tool. Logs are fundamental output of any orchestrator. Orion's logs are designed to work exactly the way that you'd expect them to work. Our logger is built entirely on Python's [standard library logging configuration hooks](https://docs.python.org/3/library/logging.config.html), so you can easily output to JSON, write to files, set levels, and more - without Orion getting in the way. All logs are associated with a flow run ID. Where relevant, they are also associated with a task run ID. +This release marks another major milestone on Orion's continued evolution into a production ready tool. Logs are fundamental output of any orchestrator. Orion's logs are designed to work exactly the way that you'd expect them to work. Our logger is built entirely on Python's [standard library logging configuration hooks](https://docs.python.org/3/library/logging.config.html), so you can easily output to JSON, write to files, set levels, and more — without Orion getting in the way. All logs are associated with a flow run ID. Where relevant, they are also associated with a task run ID. Once you've run your flow, you can find the logs in a dedicated tab on the flow run page, where you can copy them all or one line at a time. You can even watch them come in as your flow run executes. Future releases will enable further filter options and log downloads. Learn more about logging in [the docs](https://docs.prefect.io/concepts/logs/). @@ -2948,13 +7968,16 @@ This release adds pull policies to the `DockerFlowRunner` allowing full control ### Flow Runners On the heels of the recent rename of Onion's `Executor` to `TaskRunner`, this release introduces `FlowRunner`, an analogous concept that specifies the infrastructure that a flow runs on. Just as a task runner can be specified for a flow, which encapsulates tasks, a flow runner can be specified for a deployment, which encapsulates a flow. This release includes two flow runners, which we expect to be the most commonly used: -- **SubprocessFlowRunner** - The subprocess flow runner is the default flow runner. It allows for specification of a runtime Python environment with `virtualenv` and `conda` support. -- **DockerFlowRunner** - Executes the flow run in a Docker container. The image, volumes, labels, and networks can be customized. From this release on, Docker images for use with this flow runner will be published with each release. + +- **SubprocessFlowRunner** — The subprocess flow runner is the default flow runner. It allows for specification of a runtime Python environment with `virtualenv` and `conda` support. +- **DockerFlowRunner** — Executes the flow run in a Docker container. The image, volumes, labels, and networks can be customized. From this release on, Docker images for use with this flow runner will be published with each release. Future releases will introduce runners for executing flows on Kubernetes and major cloud platform's container compute services (e.g. AWS ECS, Google Cloud Run). ### Other enhancements + In addition to flow runners, we added several other enhancements and resolved a few issues, including: + - Corrected git installation command in docs - Refined UI through color, spacing, and alignment updates - Resolved memory leak issues associated with the cache of session factories @@ -2963,19 +7986,24 @@ In addition to flow runners, we added several other enhancements and resolved a ## 2.0a6 ### Subflows and Radar follow up + With the 2.0a5 release, we introduced the ability to navigate seamlessly between subflows and parent flows via Radar. In this release, we further enabled that ability by: + - Enabling the dedicated subflow runs tab on the Flow Run page - Tracking of upstream inputs to subflow runs - Adding a flow and task run count to all subflow run cards in the Radar view - Adding a mini Radar view on the Flow run page ### Task Runners + Previous versions of Prefect could only trigger execution of code defined within tasks. Orion can trigger execution of significant code that can be run _outside of tasks_. In order to make the role previously played by Prefect's `Executor` more explicit, we have renamed `Executor` to `TaskRunner`. A related `FlowRunner` component is forthcoming. ### Other enhancements + In addition to task runners and subflow UI enhancements, we added several other enhancements and resolved a few issues, including: + - Introduced dependency injection pathways so that Orion's database access can be modified after import time - Enabled the ability to copy the run ID from the flow run page - Added additional metadata to the flow run page details panel @@ -2986,12 +8014,15 @@ In addition to task runners and subflow UI enhancements, we added several other ## 2.0a5 ### Radar: A new way of visualizing workflows + Orion can orchestrate dynamic, DAG-free workflows. Task execution paths may not be known to Orion prior to a run—the graph “unfolds” as execution proceeds. Radar embraces this dynamism, giving users the clearest possible view of their workflows. Orion’s Radar is based on a structured, radial canvas upon which tasks are rendered as they are orchestrated. The algorithm optimizes readability through consistent node placement and minimal edge crossings. Users can zoom and pan across the canvas to discover and inspect tasks of interest. The mini-map, edge tracing, and node selection tools make workflow inspection a breeze. Radar also supports direct click-through to a subflow from its parent, enabling users to move seamlessly between task execution graphs. ### Other enhancements + While our focus was on Radar, we also made several other material improvements to Orion, including: + - Added popovers to dashboard charts, so you can see the specific data that comprises each visualization - Refactored the `OrionAgent` as a fully client side construct - Enabled custom policies through dependency injection at runtime into Orion via context managers @@ -3016,7 +8047,6 @@ In this release of Orion, we've reached feature parity with the existing Dask ex You can [create customizable temporary clusters](https://docs.prefect.io/tutorials/dask-task-runner/) and [connect to existing Dask clusters](https://docs.prefect.io/tutorials/dask-task-runner/). Additionally, because flows are not statically registered, we're able to easily expose Dask annotations, which allow you to [specify fine-grained controls over the scheduling of your tasks](https://docs.prefect.io/tutorials/dask-task-runner/) within Dask. - ### Subflow executors [Subflow runs](https://docs.prefect.io/concepts/flows/#composing-flows) are a first-class concept in Orion and this enables new execution patterns. @@ -3027,16 +8057,15 @@ This pattern can be nested or reused multiple times, enabling groups of tasks to Check out our [multiple executor documentation](https://docs.prefect.io/concepts/executors/#using-multiple-task-runners) for an example. - ### Other enhancements While we're excited to talk about these new features, we're always hard at work fixing bugs and improving performance. This release also includes: - Updates to database engine disposal to support large, ephemeral server flow runs - Improvements and additions to the `flow-run` and `deployment` command-line interfaces - - `prefect deployment ls` - - `prefect deployment inspect ` - - `prefect flow-run inspect ` - - `prefect flow-run ls` + — `prefect deployment ls` + — `prefect deployment inspect ` + — `prefect flow-run inspect ` + — `prefect flow-run ls` - Clarification of existing documentation and additional new documentation - Fixes for database creation and startup issues diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000000..8166c473216a --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,5 @@ +# How to report a security issue + +To report a (suspected) security issue, please email `bugbounty@prefect.io` and follow the instructions for our [bug bounty program](https://www.prefect.io/bug-bounty). + +Prefect will acknowledge receipt of your report in a timely manner, usually within 48 hours. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance. diff --git a/benches/__main__.py b/benches/__main__.py index 38d5164bc12b..ff7375fce135 100644 --- a/benches/__main__.py +++ b/benches/__main__.py @@ -19,6 +19,7 @@ [ "pytest", "--no-cov", + "--timeout=180", # TODO: These should be overridable "--benchmark-group-by=func", "--benchmark-columns=mean,stddev,min,max,rounds", diff --git a/benches/bench_cli.py b/benches/bench_cli.py new file mode 100644 index 000000000000..6ddc6783e274 --- /dev/null +++ b/benches/bench_cli.py @@ -0,0 +1,23 @@ +import subprocess + +from pytest_benchmark.fixture import BenchmarkFixture + + +def bench_prefect_help(benchmark: BenchmarkFixture): + benchmark.pedantic(subprocess.check_call, args=(["prefect", "--help"],), rounds=3) + + +def bench_prefect_version(benchmark: BenchmarkFixture): + benchmark.pedantic(subprocess.check_call, args=(["prefect", "version"],), rounds=3) + + +def bench_prefect_short_version(benchmark: BenchmarkFixture): + benchmark.pedantic( + subprocess.check_call, args=(["prefect", "--version"],), rounds=3 + ) + + +def bench_prefect_profile_ls(benchmark: BenchmarkFixture): + benchmark.pedantic( + subprocess.check_call, args=(["prefect", "profile", "ls"],), rounds=3 + ) diff --git a/benches/bench_flows.py b/benches/bench_flows.py index b4247092d438..67d8bfe89518 100644 --- a/benches/bench_flows.py +++ b/benches/bench_flows.py @@ -1,7 +1,6 @@ """ TODO: Add benches for higher number of tasks; blocked by engine deadlocks in CI. """ -import copy import anyio import pytest @@ -127,7 +126,6 @@ def bench_async_flow_with_concurrent_subflows( async def benchmark_flow(): async with anyio.create_task_group() as tg: for _ in range(num_flows): - # A copy is needed to avoid duplicate task runner starts - tg.start_soon(copy.deepcopy(test_flow)) + tg.start_soon(test_flow) benchmark(anyio.run, benchmark_flow) diff --git a/benches/bench_import.py b/benches/bench_import.py new file mode 100644 index 000000000000..5b533a112359 --- /dev/null +++ b/benches/bench_import.py @@ -0,0 +1,9 @@ +import subprocess + +from pytest_benchmark.fixture import BenchmarkFixture + + +def bench_import_prefect(benchmark: BenchmarkFixture): + benchmark.pedantic( + subprocess.check_call, args=(["python", "-c", "import prefect"],), rounds=5 + ) diff --git a/benches/conftest.py b/benches/conftest.py index 8d96f9f3ce05..386e4ce98723 100644 --- a/benches/conftest.py +++ b/benches/conftest.py @@ -1,4 +1,25 @@ +import traceback + import pytest +import pytest_benchmark.plugin + +_handle_saving = pytest_benchmark.session.BenchmarkSession.handle_saving + + +@pytest.hookimpl(hookwrapper=True) +def handle_saving(*args, **kwargs): + """ + Patches pytest-benchmark's save handler to avoid raising exceptions on failure. + An upstream bug causes failures to generate the benchmark JSON when tests fail. + """ + try: + return _handle_saving(*args, **kwargs) + except Exception: + print("Failed to save benchmark results:") + traceback.print_exc() + + +pytest_benchmark.session.BenchmarkSession.handle_saving = handle_saving @pytest.fixture(autouse=True) diff --git a/client/INFO.md b/client/INFO.md new file mode 100644 index 000000000000..ea8bdc6391c5 --- /dev/null +++ b/client/INFO.md @@ -0,0 +1,29 @@ +# Overview + +This directory contains files for building and publishing the `prefect-client` +library. `prefect-client` is built by removing source code from `prefect` and +packages its own `requirements.txt` and `setup.py`. This process can happen +in one of three ways: + +- automatically whenever a PR is created (see +`.github/workflows/prefect-client.yaml`) +- automatically whenever a Github release is published (see +`.github/workflows/prefect-client-publish.yaml`) +- manually by running the `client/build_client.sh` script locally + +Note that whenever a Github release is published the `prefect-client` will +not only get built but will also be distributed to PyPI. `prefect-client` +releases will have the same versioning as `prefect` - only the package names +will be different. + +This directory also includes a "minimal" flow that is used for smoke +tests to ensure that the built `prefect-client` is functional. + +In general, these builds, smoke tests, and publish steps should be transparent. +It these automated steps fail, use the `client/build_client.sh` script to run +the build and smoke test locally and iterate on a fix. The failures will likely +be from: + +- including a new dependency that is not installed in `prefect-client` +- re-arranging or adding files in such a way that a necessary file is rm'd at + build time diff --git a/client/README.md b/client/README.md new file mode 100644 index 000000000000..5ebdb367d78d --- /dev/null +++ b/client/README.md @@ -0,0 +1,109 @@ +

+ +

+ + PyPI + + + + + + +
+ + + + + + +

+ +# prefect-client + +The `prefect-client` package is a minimal-installation of `prefect` which is designed for interacting with Prefect Cloud +or remote any `prefect` server. It sheds some functionality and dependencies in exchange for a smaller installation size, +making it ideal for use in lightweight or ephemeral environments. These characteristics make it ideal for use in lambdas +or other resource-constrained environments. + + +## Getting started + +`prefect-client` shares the same installation requirements as prefect. To install, make sure you are on Python 3.9 or +later and run the following command: + +```bash +pip install prefect-client +``` + +Next, ensure that your `prefect-client` has access to a remote `prefect` server by exporting the `PREFECT_API_KEY` +(if using Prefect Cloud) and `PREFECT_API_URL` environment variables. Once those are set, use the package in your code as +you would normally use `prefect`! + + +For example, to remotely trigger a run a deployment: + +```python +from prefect.deployments import run_deployment + + +def my_lambda(event): + ... + run_deployment( + name="my-flow/my-deployment", + parameters={"foo": "bar"}, + timeout=0, + ) + +my_lambda({}) +``` + +To emit events in an event driven system: + +```python +from prefect.events import emit_event + + +def something_happened(): + emit_event("my-event", resource={"prefect.resource.id": "foo.bar"}) + +something_happened() +``` + + +Or just interact with a `prefect` API: +```python +from prefect.client.orchestration import get_client + + +async def query_api(): + async with get_client() as client: + limits = await client.read_concurrency_limits(limit=10, offset=0) + print(limits) + + +query_api() +``` + + +## Known limitations +By design, `prefect-client` omits all CLI and server components. This means that the CLI is not available for use +and attempts to access server objects will fail. Furthermore, some classes, methods, and objects may be available +for import in `prefect-client` but may not be "runnable" if they tap into server-oriented functionality. If you +encounter such a limitation, feel free to [open an issue](https://github.com/PrefectHQ/prefect/issues/new/choose) +describing the functionality you are interested in using and we will do our best to make it available. + + +## Next steps + +There's lots more you can do to orchestrate and observe your workflows with Prefect! +Start with our [friendly tutorial](https://docs.prefect.io/tutorials) or explore the [core concepts of Prefect workflows](https://docs.prefect.io/concepts/). + +## Join the community + +Prefect is made possible by the fastest growing community of thousands of friendly data engineers. Join us in building a new kind of workflow system. The [Prefect Slack community](https://prefect.io/slack) is a fantastic place to learn more about Prefect, ask questions, or get help with workflow design. All community forums, including code contributions, issue discussions, and slack messages are subject to our [Code of Conduct](https://discourse.prefect.io/faq). + +## Contribute + +See our [documentation on contributing to Prefect](https://docs.prefect.io/contributing/overview/). + +Thanks for being part of the mission to build a new kind of workflow system and, of course, **happy engineering!** diff --git a/client/build_client.sh b/client/build_client.sh new file mode 100755 index 000000000000..12de828a03b4 --- /dev/null +++ b/client/build_client.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +CWD=$(pwd) + +# if running in GH Actions, this will already be set +if [ -z ${TMPDIR+x} ]; + then + TMPDIR=$(mktemp -d); + echo "Using workspace at $TMPDIR"; + else echo "Using workspace at $TMPDIR"; +fi + +# init the workspace +cp -rf ./ $TMPDIR +cd $TMPDIR/src/prefect + +# delete the files we don't need +rm -rf cli/ +rm -rf deployments/recipes/ +rm -rf deployments/templates +rm -rf server/__init__.py +find ./server \ + -not -path "./server" \ + -not -path "./server/api" \ + -not -path "./server/api/*" \ + -delete +rm -rf server/database +rm -rf server/models +rm -rf server/orchestration +rm -rf server/schemas +rm -rf server/services +rm -rf testing +rm -rf server/utilities + +# replace old build files with client build files +cd $TMPDIR +cp client/setup.py . +cp client/README.md . + +# if running in GH Actions, this happens in external workflow steps +# this is a convenience to simulate the full build locally +if [ -z ${CI} ]; + then + if [[ -z "${PREFECT_API_KEY}" ]] || [[ -z "${PREFECT_API_URL}" ]]; then + echo "In order to run smoke tests locally, PREFECT_API_KEY and"\ + "PREFECT_API_URL must be set and valid for a Prefect Cloud account."; + exit 1; + fi + python -m venv venv; + source venv/bin/activate; + pip install wheel; + python setup.py sdist bdist_wheel; + pip install dist/*.tar.gz; + python client/client_flow.py; + echo "Build and smoke test completed successfully. Final results:"; + echo "$(du -sh $VIRTUAL_ENV)"; + deactivate; + else echo "Skipping local build"; +fi + +cd $CWD diff --git a/client/client_flow.py b/client/client_flow.py new file mode 100644 index 000000000000..429a4f950766 --- /dev/null +++ b/client/client_flow.py @@ -0,0 +1,32 @@ +from prefect import flow, task +from prefect.concurrency import asyncio, events, services, sync # noqa: F401 + + +def skip_remote_run(): + """ + Github Actions will not populate secrets if the workflow is triggered by + external collaborators (including dependabot). This function checks if + we're in a CI environment AND if the secret was not populated -- if + those conditions are true, we won't try to run the flow against the remote + API + """ + import os + + in_gha = os.environ.get("CI", False) + secret_not_set = os.environ.get("PREFECT_API_KEY", "") == "" + return in_gha and secret_not_set + + +@task +def smoke_test_task(*args, **kwargs): + print(args, kwargs) + + +@flow +def smoke_test_flow(): + smoke_test_task("foo", "bar", baz="qux") + + +if __name__ == "__main__": + if not skip_remote_run(): + smoke_test_flow() diff --git a/client/setup.py b/client/setup.py new file mode 100644 index 000000000000..cbef604e49f7 --- /dev/null +++ b/client/setup.py @@ -0,0 +1,47 @@ +import versioneer +from setuptools import find_packages, setup + +install_requires = open("requirements-client.txt").read().strip().split("\n") + +# grab and use the first three version digits (the generated tag) +_version = versioneer.get_version().split(".") +client_version = ".".join(_version[:3]).split("+")[0] + +setup( + # Package metadata + name="prefect-client", + description="Workflow orchestration and management.", + author="Prefect Technologies, Inc.", + author_email="help@prefect.io", + url="https://www.prefect.io", + project_urls={ + "Changelog": "https://github.com/PrefectHQ/prefect/blob/main/RELEASE-NOTES.md", + "Documentation": "https://docs.prefect.io", + "Source": "https://github.com/PrefectHQ/prefect", + "Tracker": "https://github.com/PrefectHQ/prefect/issues", + }, + long_description=open("README.md").read(), + long_description_content_type="text/markdown", + # Versioning + version=client_version, + # Package setup + packages=find_packages(where="src"), + package_dir={"": "src"}, + include_package_data=True, + # Requirements + python_requires=">=3.9", + install_requires=install_requires, + extras_require={"notifications": ["apprise>=1.1.0, <2.0.0"]}, + classifiers=[ + "Natural Language :: English", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Software Development :: Libraries", + ], +) diff --git a/compat-tests b/compat-tests new file mode 160000 index 000000000000..3c5ec0111e2a --- /dev/null +++ b/compat-tests @@ -0,0 +1 @@ +Subproject commit 3c5ec0111e2aa7b160f2b21cfd383d19448dfe13 diff --git a/docker-compose.yml b/docker-compose.yml index 1a27a80ff29c..2e3ffb03727b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,6 @@ -version: "3.8" services: test-db: - image: postgres:13 + image: postgres:14 ports: - 15432:5432 environment: @@ -14,3 +13,12 @@ services: LC_COLLATE: 'C.UTF-8' LC_CTYPE: 'C.UTF-8' tmpfs: /var/lib/postgresql/data + command: + - postgres + - -c + - max_connections=250 + registry: + image: registry:2 + container_name: prefect-test-registry + ports: + - "5555:5000" diff --git a/docs/2.19.x/api-reference/overview.mdx b/docs/2.19.x/api-reference/overview.mdx new file mode 100644 index 000000000000..ae356724fa85 --- /dev/null +++ b/docs/2.19.x/api-reference/overview.mdx @@ -0,0 +1,19 @@ +--- +title: API Reference +sidebarTitle: Overview +--- + +Prefect auto-generates reference documentation for the following components: + +* **[Prefect Python SDK](https://docs.prefect.io/api-ref/python/)**: used to build, test, and execute workflows. +* **[Prefect REST API](https://docs.prefect.io/api-ref/rest-api/)**: used by both workflow clients as well as the Prefect UI for orchestration and data retrieval +* Prefect Cloud REST API documentation is available at [https://app.prefect.cloud/api/docs](https://app.prefect.cloud/api/docs). +* The REST API documentation for a locally hosted open-source Prefect server is available in the [Prefect REST API Reference](https://docs.prefect.io/api-ref/rest-api-reference/). +* **[Prefect Server SDK](https://docs.prefect.io/api-ref/server/)**: used primarily by the server to work with workflow metadata and enforce orchestration logic. This is only used directly by Prefect developers and contributors. + + +**Self-hosted docs** + + +When self-hosting, you can access REST API documentation at the `/docs` endpoint of your [`PREFECT_API_URL`](https://docs.prefect.io/concepts/settings/#prefect_api_url) - for example, if you ran `prefect server start` with no additional configuration you can find this reference at [http://localhost:4200/docs](http://localhost:4200/docs). + \ No newline at end of file diff --git a/docs/2.19.x/api-reference/python-sdk/overview.mdx b/docs/2.19.x/api-reference/python-sdk/overview.mdx new file mode 100644 index 000000000000..887bced2be8d --- /dev/null +++ b/docs/2.19.x/api-reference/python-sdk/overview.mdx @@ -0,0 +1,9 @@ +--- +title: Python SDK +sidebarTitle: Overview +--- +The Prefect Python SDK is used to build, test, and execute workflows against the Prefect API. + +Explore the modules in the navigation bar to the left to learn more. + + diff --git a/docs/2.19.x/cloud/api-rate-limits.mdx b/docs/2.19.x/cloud/api-rate-limits.mdx new file mode 100644 index 000000000000..7889ede4beab --- /dev/null +++ b/docs/2.19.x/cloud/api-rate-limits.mdx @@ -0,0 +1,60 @@ +--- +sidebarTitle: API Rate Limits +title: API Rate Limits & Retention Periods +--- + + +API rate limits restrict the number of requests that a single client can make in a given time period. They ensure Prefect Cloud's stability, so that when you make an API call, you always get a response. + + +**Prefect Cloud rate limits are subject to change** + +The following rate limits are in effect currently, but are subject to change. Contact Prefect support at [help@prefect.io](mailto:help@prefect.io) if you have questions about current rate limits. + + +Prefect Cloud enforces the following rate limits: + +* Flow and task creation rate limits +* Log service rate limits + +Flow, flow run, and task run rate limits +----------------------------------------------------------------------------------------------------- + +Prefect Cloud limits the `flow_runs`, `task_runs`, and `flows` endpoints and their subroutes at the following levels: + +* 400 per minute for personal accounts +* 2,000 per minute for Pro accounts + +The Prefect Cloud API will return a `429` response with an appropriate `Retry-After` header if these limits are triggered. + +Log service rate limits +--------------------------------------------------------------------- + +Prefect Cloud limits the number of logs accepted: + +* 700 logs per minute for personal accounts +* 10,000 logs per minute for Pro accounts + +The Prefect Cloud API will return a `429` response if these limits are triggered. + +Flow run retention +----------------------------------------------------------- + + +**Prefect Cloud feature** + +The Flow Run Retention Policy setting is only applicable in Prefect Cloud. + +Flow runs in Prefect Cloud are retained according to the Flow Run Retention Policy set by your account tier. The policy setting applies to all workspaces owned by the account. + +The flow run retention policy represents the number of days each flow run is available in the Prefect Cloud UI, and via the Prefect CLI and API after it ends. Once a flow run reaches a terminal state ([detailed in the chart here](https://docs.prefect.io/concepts/states/#state-types)), it will be retained until the end of the flow run retention period. + + +**Flow Run Retention Policy keys on terminal state** + +Note that, because Flow Run Retention Policy keys on terminal state, if two flows start at the same time, but reach a terminal state at different times, they will be removed at different times according to when they each reached their respective terminal states. + + +This retention policy applies to all [details about a flow run](https://docs.prefect.io/ui/flow-runs/#inspect-a-flow-run), including its task runs. Subflow runs follow the retention policy independently from their parent flow runs, and are removed based on the time each subflow run reaches a terminal state. + +If you or your organization have needs that require a tailored retention period, [contact the Prefect Sales team](https://www.prefect.io/pricing). \ No newline at end of file diff --git a/docs/2.19.x/cloud/connection--troubleshooting.mdx b/docs/2.19.x/cloud/connection--troubleshooting.mdx new file mode 100644 index 000000000000..f3488b7aeea8 --- /dev/null +++ b/docs/2.19.x/cloud/connection--troubleshooting.mdx @@ -0,0 +1,175 @@ +--- +title: Connecting & Troubleshooting +--- +To create flow runs in a local or remote execution environment and use either Prefect Cloud or a Prefect server as the backend API server, you need to + +* Configure the execution environment with the location of the API. +* Authenticate with the API, either by logging in or providing a valid API key (Prefect Cloud only). + +Log into Prefect Cloud from a terminal +--------------------------------------------------------------------------------------------------- + +Configure a local execution environment to use Prefect Cloud as the API server for flow runs. In other words, "log in" to Prefect Cloud from a local environment where you want to run a flow. + +1. Open a new terminal session. +2. [Install Prefect](https://docs.prefect.io/getting-started/installation/) in the environment in which you want to execute flow runs. + +1. Use the `prefect cloud login` Prefect CLI command to log into Prefect Cloud from your environment. + +The `prefect cloud login` command, used on its own, provides an interactive login experience. Using this command, you can log in with either an API key or through a browser. + +``` +$ prefect cloud login +? How would you like to authenticate? [Use arrows to move; enter to select] +> Log in with a web browser + Paste an API key +Paste your authentication key: +? Which workspace would you like to use? [Use arrows to move; enter to select] +> prefect/terry-prefect-workspace + g-gadflow/g-workspace +Authenticated with Prefect Cloud! Using workspace 'prefect/terry-prefect-workspace'. + +``` + + +You can also log in by providing a [Prefect Cloud API key](https://docs.prefect.io/2.19.1/cloud/users/api-keys/) that you create. + +### Change workspaces + +If you need to change which workspace you're syncing with, use the `prefect cloud workspace set` Prefect CLI command while logged in, passing the account handle and workspace name. + +``` +$ prefect cloud workspace set --workspace "prefect/my-workspace" + +``` + + +If no workspace is provided, you will be prompted to select one. + +**Workspace Settings** also shows you the `prefect cloud workspace set` Prefect CLI command you can use to sync a local execution environment with a given workspace. + +You may also use the `prefect cloud login` command with the `--workspace` or `-w` option to set the current workspace. + +``` +$ prefect cloud login --workspace "prefect/my-workspace" + +``` + + +Manually configure Prefect API settings +----------------------------------------------------------------------------------------------------- + +You can also manually configure the `PREFECT_API_URL` setting to specify the Prefect Cloud API. + +For Prefect Cloud, you can configure the `PREFECT_API_URL` and `PREFECT_API_KEY` settings to authenticate with Prefect Cloud by using an account ID, workspace ID, and API key. + +``` +$ prefect config set PREFECT_API_URL="https://api.prefect.cloud/api/accounts/[ACCOUNT-ID]/workspaces/[WORKSPACE-ID]" +$ prefect config set PREFECT_API_KEY="[API-KEY]" + +``` + + +When you're in a Prefect Cloud workspace, you can copy the `PREFECT_API_URL` value directly from the page URL. + +In this example, we configured `PREFECT_API_URL` and `PREFECT_API_KEY` in the default profile. You can use `prefect profile` CLI commands to create settings profiles for different configurations. For example, you could have a "cloud" profile configured to use the Prefect Cloud API URL and API key, and another "local" profile for local development using a local Prefect API server started with `prefect server start`. See [Settings](https://docs.prefect.io/concepts/settings/) for details. + + +**Environment variables** + +You can also set `PREFECT_API_URL` and `PREFECT_API_KEY` as you would any other environment variable. See [Overriding defaults with environment variables](https://docs.prefect.io/concepts/settings/#overriding-defaults-with-environment-variables) for more information. + + + + +See the [Flow orchestration with Prefect](https://docs.prefect.io/tutorial/flows/) tutorial for examples. + +Install requirements in execution environments +------------------------------------------------------------------------------------------------------------------- + +In local and remote execution environments — such as VMs and containers — you must make sure any flow requirements or dependencies have been installed before creating a flow run. + +Troubleshooting Prefect Cloud +--------------------------------------------------------------------------------- + +This section provides tips that may be helpful if you run into problems using Prefect Cloud. + +Prefect Cloud and proxies +------------------------------------------------------------------------- + +Proxies intermediate network requests between a server and a client. + +To communicate with Prefect Cloud, the Prefect client library makes HTTPS requests. These requests are made using the [`httpx`](https://www.python-httpx.org/) Python library. `httpx` respects accepted proxy environment variables, so the Prefect client is able to communicate through proxies. + +To enable communication via proxies, simply set the `HTTPS_PROXY` and `SSL_CERT_FILE` environment variables as appropriate in your execution environment and things should “just work.” + +See the [Using Prefect Cloud with proxies](https://discourse.prefect.io/t/using-prefect-cloud-with-proxies/1696) topic in Prefect Discourse for examples of proxy configuration. + +URLs that should be whitelisted for outbound-communication in a secure environment include the UI, the API, Authentication, and the current OCSP server: + +* app.prefect.cloud +* api.prefect.cloud +* auth.workos.com +* api.github.com +* github.com +* ocsp.pki.goog/s/gts1d4/OxYEb8XcYmo + +Prefect Cloud access via API +------------------------------------------------------------------------------- + +If the Prefect Cloud API key, environment variable settings, or account login for your execution environment are not configured correctly, you may experience errors or unexpected flow run results when using Prefect CLI commands, running flows, or observing flow run results in Prefect Cloud. + +Use the `prefect config view` CLI command to make sure your execution environment is correctly configured to access Prefect Cloud. + +``` +$ prefect config view +PREFECT_PROFILE='cloud' +PREFECT_API_KEY='pnu_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' (from profile) +PREFECT_API_URL='https://api.prefect.cloud/api/accounts/...' (from profile) + +``` + + +Make sure `PREFECT_API_URL` is configured to use `https://api.prefect.cloud/api/...`. + +Make sure `PREFECT_API_KEY` is configured to use a valid API key. + +You can use the `prefect cloud workspace ls` CLI command to view or set the active workspace. + +``` +$ prefect cloud workspace ls +┏━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ Available Workspaces: ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ g-gadflow/g-workspace │ +│ * prefect/workinonit │ +└─────────────────────────┘ + * active workspace + +``` + + +You can also check that the account and workspace IDs specified in the URL for `PREFECT_API_URL` match those shown in the URL bar for your Prefect Cloud workspace. + +Prefect Cloud login errors +--------------------------------------------------------------------------- + +If you're having difficulty logging in to Prefect Cloud, the following troubleshooting steps may resolve the issue, or will provide more information when sharing your case to the support channel. + +* Are you logging into Prefect Cloud 2? Prefect Cloud 1 and Prefect Cloud 2 use separate accounts. Make sure to use the right Prefect Cloud 2 URL: [https://app.prefect.cloud/](https://app.prefect.cloud/) +* Do you already have a Prefect Cloud account? If you’re having difficulty accepting an invitation, try creating an account first using the email associated with the invitation, then accept the invitation. +* Are you using a single sign-on (SSO) provider, social authentication (Google, Microsoft, or GitHub) or just using an emailed link? + +Other tips to help with login difficulties: + +* Hard refresh your browser with Cmd+Shift+R. +* Try in a different browser. We actively test against the following browsers: +* Chrome +* Edge +* Firefox +* Safari +* Clear recent browser history/cookies + +None of this worked? + +Email us at [help@prefect.io](mailto:help@prefect.io) and provide answers to the questions above in your email to make it faster to troubleshoot and unblock you. Make sure you add the email address with which you were trying to log in, your Prefect Cloud account name, and, if applicable, the organization to which it belongs. \ No newline at end of file diff --git a/docs/2.19.x/cloud/events.mdx b/docs/2.19.x/cloud/events.mdx new file mode 100644 index 000000000000..71373927b702 --- /dev/null +++ b/docs/2.19.x/cloud/events.mdx @@ -0,0 +1,137 @@ +--- +title: Events +--- + +An event is a notification of a change. Together, events form a feed of activity recording what's happening across your stack. + +Events power several features in Prefect Cloud, including flow run logs, audit logs, and automations. + +Events can represent API calls, state transitions, or changes in your execution environment or infrastructure. + +Events enable observability into your data stack via the [event feed](https://docs.prefect.io/ui/events/#event-feed), and the configuration of Prefect's reactivity via [automations](https://docs.prefect.io/concepts/automations/). + +![Prefect UI](/images/events1.png) + +Event specification +------------------------------------------------------------- + +Events adhere to a structured [specification](https://app.prefect.cloud/api/docs#tag/Events). + +![Prefect UI](/images/events2.png) + + +|Name |Type |Required?|Description | +|--------|------|---------|--------------------------------------------------------------------| +|occurred|String|yes |When the event happened | +|event |String|yes |The name of the event that happened | +|resource|Object|yes |The primary Resource this event concerns | +|related |Array |no |A list of additional Resources involved in this event | +|payload |Object|no |An open-ended set of data describing what happened | +|id |String|yes |The client-provided identifier of this event | +|follows |String|no |The ID of an event that is known to have occurred prior to this one.| + + +Event grammar +------------------------------------------------- + +Generally, events have a consistent and informative grammar - an event describes a resource and an action that the resource took or that was taken on that resource. For example, events emitted by Prefect objects take the form of: + +``` +prefect.block.write-method.called +prefect-cloud.automation.action.executed +prefect-cloud.user.logged-in + +``` + + +Event sources +------------------------------------------------- + +Events are automatically emitted by all Prefect objects, including flows, tasks, deployments, work queues, and logs. Prefect-emitted events will contain the `prefect` or `prefect-cloud` resource prefix. Events can also be sent to the Prefect [events API](https://app.prefect.cloud/api/docs#tag/Events) via authenticated http request. + +### Emit custom events from Python code + +The Prefect Python SDK provides an `emit_event` function that emits an Prefect event when called. The function can be called inside or outside of a task or flow. Running the following code will emit an event to Prefect Cloud, which will validate and ingest the event data. + +```python +from prefect.events import emit_event + +def some_function(name: str="kiki") -> None: + print(f"hi {name}!") + emit_event(event=f"{name}.sent.event!", resource={"prefect.resource.id": f"coder.{name}"}) + +some_function() + +``` + + +Note that the `emit_event` arguments shown above are required: `event` represents the name of the event and `resource={"prefect.resource.id": "my_string"}` is the resource id. To get data into an event for use in an automation action, you can specify a dictionary of values for the `payload` parameter. + +### Emit events via webhooks + +Prefect Cloud offers [programmable webhooks](https://docs.prefect.io/guides/webhooks/) to receive HTTP requests from other systems and translate them into events within your workspace. Webhooks can emit [pre-defined static events](https://docs.prefect.io/guides/webhooks/#static-webhook-events), dynamic events that [use portions of the incoming HTTP request](https://docs.prefect.io/guides/webhooks/#dynamic-webhook-events), or events derived from [CloudEvents](https://docs.prefect.io/guides/webhooks/#accepting-cloudevents). + +Events emitted from any source will appear in the event feed, where you can visualize activity in context and configure [automations](https://docs.prefect.io/concepts/automations/) to react to the presence or absence of it in the future. + +Resources +----------------------------------------- + +Every event has a primary resource, which describes the object that emitted an event. Resources are used as quasi-stable identifiers for sources of events, and are constructed as dot-delimited strings, for example: + +``` +prefect-cloud.automation.5b9c5c3d-6ca0-48d0-8331-79f4b65385b3.action.0 +acme.user.kiki.elt_script_1 +prefect.flow-run.e3755d32-cec5-42ca-9bcd-af236e308ba6 + +``` + + +Resources can optionally have additional arbitrary labels which can be used in event aggregation queries, such as: + +```python +"resource": { + "prefect.resource.id": "prefect-cloud.automation.5b9c5c3d-6ca0-48d0-8331-79f4b65385b3", + "prefect-cloud.action.type": "call-webhook" + } + +``` + + +Events can optionally contain related resources, used to associate the event with other resources, such as in the case that the primary resource acted on or with another resource: + +```python +"resource": { + "prefect.resource.id": "prefect-cloud.automation.5b9c5c3d-6ca0-48d0-8331-79f4b65385b3.action.0", + "prefect-cloud.action.type": "call-webhook" + }, +"related": [ + { + "prefect.resource.id": "prefect-cloud.automation.5b9c5c3d-6ca0-48d0-8331-79f4b65385b3", + "prefect.resource.role": "automation", + "prefect-cloud.name": "webhook_body_demo", + "prefect-cloud.posture": "Reactive" + } +] + +``` + + +Events in the Cloud UI +------------------------------------------------------------------- + +Prefect Cloud provides an interactive dashboard to analyze and take action on events that occurred in your workspace on the event feed page. + +![Event feed](/images/events3.png) + +The event feed is the primary place to view, search, and filter events to understand activity across your stack. Each entry displays data on the resource, related resource, and event that took place. + +You can view more information about an event by clicking into it, where you can view the full details of an event's resource, related resources, and its payload. + +Reacting to events +----------------------------------------------------------- + +From an event page, you can configure an [automation](https://docs.prefect.io/concepts/automations) to trigger on the observation of matching events or a lack of matching events by clicking the automate button in the overflow menu: + +![Automation from event](/images/events4.png) + +The default trigger configuration will fire every time it sees an event with a matching resource identifier. Advanced configuration is possible via [custom triggers](https://docs.prefect.io/cloud/automations/). \ No newline at end of file diff --git a/docs/2.19.x/cloud/incidents.mdx b/docs/2.19.x/cloud/incidents.mdx new file mode 100644 index 000000000000..02d767528d75 --- /dev/null +++ b/docs/2.19.x/cloud/incidents.mdx @@ -0,0 +1,106 @@ +--- +title: Incidents +--- + +Overview +--------------------------------------- + +Incidents are a Prefect Cloud feature to help your team manage workflow disruptions. Incidents help you identify, resolve, and document issues with mission-critical workflows. This system enhances operational efficiency by automating the incident management process and providing a centralized platform for collaboration and compliance. + +What are incidents? +------------------------------------------------------------ + +Incidents are formal declarations of disruptions to a workspace. With [automations](#incident-automations), activity in a workspace can be paused when an incident is created and resumed when it is resolved. + +Incidents vary in nature and severity, ranging from minor glitches to critical system failures. Prefect Cloud enables users to effectively and automatically track and manage these incidents, ensuring minimal impact on operational continuity. + +![Incidents in the Prefect Cloud UI](/images/incidents1.png) + +Why use incident management? +------------------------------------------------------------------------------ + +1. **Automated detection and reporting**: Incidents can be automatically identified based on specific triggers or manually reported by team members, facilitating prompt response. + +2. **Collaborative problem-solving**: The platform fosters collaboration, allowing team members to share insights, discuss resolutions, and track contributions. + +3. **Comprehensive impact assessment**: Users gain insights into the incident's influence on workflows, helping in prioritizing response efforts. + +4. **Compliance with incident management processes**: Detailed documentation and reporting features support compliance with incident management systems. + +5. **Enhanced operational transparency**: The system provides a transparent view of both ongoing and resolved incidents, promoting accountability and continuous improvement. + + +![An active incident in the Prefect Cloud UI](/images/incidents2.png) + +How to use incident management in Prefect Cloud +--------------------------------------------------------------------------------------------------------------------- + +### Creating an incident + +There are several ways to create an incident: + +1. **From the Incidents page:** + + * Click on the **+** button. + * Fill in required fields and attach any Prefect resources related to your incident. +2. **From a flow run, work pool, or block:** + + * Initiate an incident directly from a failed flow run, automatically linking it as a resource, by clicking on the menu button and selecting "Declare an incident". +3. **Via an [automation](https://docs.prefect.io/concepts/automations/):** + + * Set up incident creation as an automated response to selected triggers. + +### Incident automations + +Automations can be used for triggering an incident and for selecting actions to take when an incident is triggered. For example, a work pool status change could trigger the declaration of an incident, or a critical level incident could trigger a notification action. + +To automatically take action when an incident is declared, set up a custom trigger that listens for declaration events. + +```python +{ + "match": { + "prefect.resource.id": "prefect-cloud.incident.*" + }, + "expect": [ + "prefect-cloud.incident.declared" + ], + "posture": "Reactive", + "threshold": 1, + "within": 0 +} + +``` + + +**Building custom triggers** + + +To get started with incident automations, you only need to specify two fields in your trigger: + +* **match**: The resource emitting your event of interest. You can match on specific resource IDs, use wildcards to match on all resources of a given type, and even match on other resource attributes, like `prefect.resource.name`. + +* **expect**: The event type to listen for. For example, you could listen for any (or all) of the following event types: + + * `prefect-cloud.incident.declared` + * `prefect-cloud.incident.resolved` + * `prefect-cloud.incident.updated.severity` + +See [Event Triggers](https://docs.prefect.io/concepts/automations/#custom-triggers) for more information on custom triggers, and check out your Event Feed to see the event types emitted by your incidents and other resources (i.e. events that you can react to). + +When an incident is declared, any actions you configure such as pausing work pools or sending notifications, will execute immediately. + +### Managing an incident + +* **Monitor active incidents**: View real-time status, severity, and impact. +* **Adjust incident details**: Update status, severity, and other relevant information. +* **Collaborate**: Add comments and insights; these will display with user identifiers and timestamps. +* **Impact assessment**: Evaluate how the incident affects ongoing and future workflows. + +### Resolving and documenting incidents + +* **Resolution**: Update the incident status to reflect resolution steps taken. +* **Documentation**: Ensure all actions, comments, and changes are logged for future reference. + +### Incident reporting + +* Generate a detailed timeline of the incident: actions taken, updates to severity and resolution - suitable for compliance and retrospective analysis. \ No newline at end of file diff --git a/docs/2.19.x/cloud/overview.mdx b/docs/2.19.x/cloud/overview.mdx new file mode 100644 index 000000000000..a625f0837276 --- /dev/null +++ b/docs/2.19.x/cloud/overview.mdx @@ -0,0 +1,153 @@ +--- +title: Welcome to Prefect Cloud +sidebarTitle: Overview +--- + +Prefect Cloud is a hosted workflow application framework that provides all the capabilities of Prefect server plus additional features, such as: + +* automations, events, and webhooks so you can create event-driven workflows +* workspaces, RBAC, SSO, audit logs and related user management tools for collaboration +* push work pools for running flows on serverless infrastructure without a worker +* error summaries powered by Marvin AI to help you resolve errors faster + + +**Getting Started with Prefect Cloud** + +Ready to jump right in and start running with Prefect Cloud? See the [Quickstart](https://docs.prefect.io/getting-started/quickstart/) and follow the instructions on the **Cloud** tabs to write and deploy your first Prefect Cloud-monitored flow run. + +![Viewing a workspace dashboard in the Prefect Cloud UI](/images/cloud-overview1.png) + +Prefect Cloud includes all the features in the open-source Prefect server plus the following: + + +**Prefect Cloud features** + +* [User accounts](#user-accounts) — personal accounts for working in Prefect Cloud. +* [Workspaces](https://docs.prefect.io/cloud/workspaces/) — isolated environments to organize your flows, deployments, and flow runs. +* [Automations](https://docs.prefect.io/cloud/automations/) — configure triggers, actions, and notifications in response to real-time monitoring events. +* [Email notifications](https://docs.prefect.io/cloud/automations/) — send email alerts from Prefect's server based on automation triggers. +* [Service accounts](https://docs.prefect.io/cloud/users/service-accounts/) — configure API access for running workers or executing flow runs on remote infrastructure. +* [Custom role-based access controls (RBAC)](https://docs.prefect.io/cloud/users/roles/) — assign users granular permissions to perform certain activities within an account or a workspace. +* [Single Sign-on (SSO)](https://docs.prefect.io/cloud/users/sso/) — authentication using your identity provider. +* [Audit Log](https://docs.prefect.io/cloud/users/audit-log/) — a record of user activities to monitor security and compliance. +* Collaboration — invite other people to your account. +* Error summaries — (enabled by Marvin AI) distill the error logs of `Failed` and `Crashed` flow runs into actionable information. +* [Push work pools](https://docs.prefect.io/guides/deployment/push-work-pools/) — run flows on your serverless infrastructure without running a worker. + + +User accounts +------------------------------------------------- + +When you sign up for Prefect Cloud, an account and a user profile are automatically provisioned for you. + +Your profile is the place where you'll manage settings related to yourself as a user, including: + +* Profile, including profile handle and image +* API keys +* Preferences, including timezone and color mode + +As an account Admin, you will also have access to account settings from the Account Settings page, such as: + +* Members +* Workspaces +* Roles + +As an account Admin you can create a [workspace](#workspaces) and invite other individuals to your workspace. + +Upgrading from a Prefect Cloud Free tier plan to a Pro or Custom tier plan enables additional functionality for adding workspaces, managing teams, and running higher volume workloads. + +Workspace Admins for Pro tier plans have the ability to set [role-based access controls (RBAC)](#roles-and-custom-permissions), view [Audit Logs](#audit-log), and configure [service accounts](#service-accounts). + +Custom plans have [object-level access control lists](https://docs.prefect.io/cloud/users/object-access-control-lists/), [custom roles](https://docs.prefect.io/cloud/users/roles/), [teams](https://docs.prefect.io/cloud/users/teams/), and \[single sign-on (SSO)\](#single-sign-on-(sso) with [Directory Sync/SCIM provisioning](https://docs.prefect.io/cloud/users/sso/#scim-provisioning). + + +**Prefect Cloud plans for teams of every size** + +See the [Prefect Cloud plans](https://www.prefect.io/pricing/) for details on Pro and Custom account tiers. + + +Workspaces +------------------------------------------- + +A workspace is an isolated environment within Prefect Cloud for your flows, deployments, and block configuration. See the [Workspaces](https://docs.prefect.io/cloud/workspaces/) documentation for more information about configuring and using workspaces. + +Each workspace keeps track of its own: + +* [Flow runs](https://docs.prefect.io/concepts/flows/) and task runs executed in an environment that is [syncing with the workspace](https://docs.prefect.io/cloud/workspaces/) +* [Flows](https://docs.prefect.io/concepts/flows/) associated with flow runs and deployments observed by the Prefect Cloud API +* [Deployments](https://docs.prefect.io/concepts/deployments/) +* [Work pools](https://docs.prefect.io/concepts/work-pools/) +* [Blocks](https://docs.prefect.io/concepts/blocks/) and [storage](https://docs.prefect.io/concepts/storage/) +* [Events](https://docs.prefect.io/cloud/events/) +* [Automations](https://docs.prefect.io/concepts/automations/) +* [Incidents](https://docs.prefect.io/cloud/incidents/) + +![Viewing a workspace dashboard in the Prefect Cloud UI.](/images/cloud-overview2.png) + +Events +----------------------------------- + +Prefect Cloud allows you to see your [events](https://docs.prefect.io/cloud/events/). Events provide information about the state of your workflows, and can be used as [automation](https://docs.prefect.io/concepts/automations/) triggers. + +![Prefect UI](/images/cloud-overview3.png) + +Automations +--------------------------------------------- + +Prefect Cloud [automations](https://docs.prefect.io/concepts/automations/) provide additional notification capabilities beyond those in a self-hosted open-source Prefect server. Automations also enable you to create event-driven workflows, toggle resources such as schedules and work pools, and declare incidents. + +Incidents +------------------------------------------ + +Prefect Cloud's [incidents](https://docs.prefect.io/cloud/incidents/) help teams identify, rectify, and document issues in mission-critical workflows. Incidents are formal declarations of disruptions to a workspace. With [automations](https://docs.prefect.io/cloud/incidents/#incident-automations)), activity in that workspace can be paused when an incident is created and resumed when it is resolved. + +Error summaries +----------------------------------------------------- + +Prefect Cloud error summaries, enabled by Marvin AI, distill the error logs of `Failed` and `Crashed` flow runs into actionable information. To enable this feature and others powered by Marvin AI, visit the **Settings** page for your account. + +Service accounts +-------------------------------------------------------- + +Service accounts enable you to create Prefect Cloud API keys that are not associated with a user account. Service accounts are typically used to configure API access for running workers or executing flow runs on remote infrastructure. See the [service accounts](https://docs.prefect.io/cloud/users/service-accounts/) documentation for more information about creating and managing service accounts. + +Roles and custom permissions +-------------------------------------------------------------------------------- + +Role-based access controls (RBAC) enable you to assign users a role with permissions to perform certain activities within an account or a workspace. See the [role-based access controls (RBAC)](https://docs.prefect.io/cloud/users/roles/) documentation for more information about managing user roles in a Prefect Cloud account. + +Single Sign-on (SSO) +-------------------------------------------------------------- + +Prefect Cloud's [Pro and Custom plans](https://www.prefect.io/pricing) offer [single sign-on (SSO)](https://docs.prefect.io/cloud/users/sso/) authentication integration with your team’s identity provider. SSO integration can bet set up with identity providers that support OIDC and SAML. Directory Sync and SCIM provisioning is also available with Custom plans. + +Audit log +------------------------------------------ + +Prefect Cloud's [Pro and Custom plans](https://www.prefect.io/pricing) offer [Audit Logs](https://docs.prefect.io/cloud/users/audit-log/) for compliance and security. Audit logs provide a chronological record of activities performed by users in an account. + +Prefect Cloud REST API +------------------------------------------------------------------- + +The [Prefect REST API](https://docs.prefect.io/api-ref/rest-api/) is used for communicating data from Prefect clients to Prefect Cloud or a local Prefect server for orchestration and monitoring. This API is mainly consumed by Prefect clients like the Prefect Python Client or the Prefect UI. + + +**Prefect Cloud REST API interactive documentation** + + +Prefect Cloud REST API documentation is available at [https://app.prefect.cloud/api/docs](https://app.prefect.cloud/api/docs). + + + +Start using Prefect Cloud +------------------------------------------------------------------------- + +To create an account or sign in with an existing Prefect Cloud account, go to [https://app.prefect.cloud/](https://app.prefect.cloud/). + +Then follow the steps in the UI to deploy your first Prefect Cloud-monitored flow run. For more details, see the [Prefect Quickstart](https://docs.prefect.io/getting-started/quickstart/) and follow the instructions on the **Cloud** tabs. + + +**Need help?** + +Get your questions answered by a Prefect Product Advocate! [Book a Meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) + \ No newline at end of file diff --git a/docs/2.19.x/cloud/users/api-keys.mdx b/docs/2.19.x/cloud/users/api-keys.mdx new file mode 100644 index 000000000000..7f2121d3fe81 --- /dev/null +++ b/docs/2.19.x/cloud/users/api-keys.mdx @@ -0,0 +1,48 @@ +--- +sidebarTitle: API Keys +title: Manage Prefect Cloud API Keys +--- + + +API keys enable you to authenticate a local environment to work with Prefect Cloud. + +If you run `prefect cloud login` from your CLI, you'll have the choice to authenticate through your browser or by pasting an API key. + +If you choose to authenticate through your browser, you'll be directed to an authorization page. After you grant approval to connect, you'll be redirected to the CLI and the API key will be saved to your local [Prefect profile](https://docs.prefect.io/guides/settings/). + +If you choose to authenticate by pasting an API key, you'll need to create an API key in the Prefect Cloud UI first. + +Create an API key +--------------------------------------------------------- + +To create an API key, select the account icon at the bottom-left corner of the UI. + +Select **API Keys**. The page displays a list of previously generated keys and lets you create new API keys or delete keys. + +![Viewing and editing API keys in the Cloud UI.](/images/api-keys1.png) + +Select the **+** button to create a new API key. Provide a name for the key and an expiration date. + +![Creating an API key in the Cloud UI.](/images/api-keys2.png) + +**Warning** + +Note that an API key cannot be revealed again in the UI after it is generated, so copy the key to a secure location. + +Log into Prefect Cloud with an API Key +--------------------------------------------------------------------------------------------------- + +``` +prefect cloud login -k '' + +``` + + +Service account API keys +------------------------------------------------------------------------ + +Service accounts are a feature of Prefect Cloud [Pro and Custom tier plans](https://www.prefect.io/pricing) that enable you to create a Prefect Cloud API key that is not associated with a user account. + +Service accounts are typically used to configure API access for running workers or executing flow runs on remote infrastructure. Events and logs for flow runs in those environments are then associated with the service account rather than a user, and API access may be managed or revoked by configuring or removing the service account without disrupting user access. + +See the [service accounts](https://docs.prefect.io/cloud/users/service-accounts/) documentation for more information about creating and managing service accounts in Prefect Cloud. \ No newline at end of file diff --git a/docs/2.19.x/cloud/users/audit-log.mdx b/docs/2.19.x/cloud/users/audit-log.mdx new file mode 100644 index 000000000000..221d0a5dd08f --- /dev/null +++ b/docs/2.19.x/cloud/users/audit-log.mdx @@ -0,0 +1,42 @@ +--- +title: Audit Log +--- + +Prefect Cloud's [Pro and Custom plans](https://www.prefect.io/pricing) offer enhanced compliance and transparency tools with Audit Log. Audit logs provide a chronological record of activities performed by members in your account, allowing you to monitor detailed Prefect Cloud actions for security and compliance purposes. + +Audit logs enable you to identify who took what action, when, and using what resources within your Prefect Cloud account. In conjunction with appropriate tools and procedures, audit logs can assist in detecting potential security violations and investigating application errors. + +Audit logs can be used to identify changes in: + +* Access to workspaces +* User login activity +* User API key creation and removal +* Workspace creation and removal +* Account member invitations and removal +* Service account creation, API key rotation, and removal +* Billing payment method for self-serve pricing tiers + +See the [Prefect Cloud plan information](https://www.prefect.io/pricing) to learn more about options for supporting audit logs. + +Viewing audit logs +----------------------------------------------------------- + +From your Pro or Custom account settings page, select the **Audit Log** page to view audit logs. + +![Viewing audit logs for an account in the Prefect Cloud UI.](/images/audit-log1.png) + +Pro and Custom account tier admins can view audit logs for: + +* Account-level events in Prefect Cloud, such as: +* Member invites +* Changing a member’s role +* Member login and logout of Prefect Cloud +* Creating or deleting a service account +* Workspace-level events in Prefect Cloud, such as: +* Adding a member to a workspace +* Changing a member’s workspace role +* Creating or deleting a workspace + +Admins can filter audit logs on multiple dimensions to restrict the results they see by workspace, user, or event type. Available audit log events are displayed in the **Events** drop-down menu. + +Audit logs may also be filtered by date range. Audit log retention period varies by [Prefect Cloud plan](https://www.prefect.io/pricing). \ No newline at end of file diff --git a/docs/2.19.x/cloud/users/objective-level-access-control.mdx b/docs/2.19.x/cloud/users/objective-level-access-control.mdx new file mode 100644 index 000000000000..f8b8820108d9 --- /dev/null +++ b/docs/2.19.x/cloud/users/objective-level-access-control.mdx @@ -0,0 +1,17 @@ +--- +title: Object-Level Access Control Lists +--- + +Prefect Cloud's [Custom plan](https://www.prefect.io/pricing) offers object-level access control lists to restrict access to specific users and service accounts within a workspace. ACLs are supported for blocks and deployments. + +Organization Admins and Workspace Owners can configure access control lists by navigating to an object and clicking **manage access**. When an ACL is added, all users and service accounts with access to an object via their workspace role will lose access if not explicitly added to the ACL. + +![Viewing ACL for a deployment in the Prefect Cloud UI.](/images/olacl.png) + + +**ACLs and visibility** + + +Objects not governed by access control lists such as flow runs, flows, and artifacts will be visible to a user within a workspace even if an associated block or deployment has been restricted for that user. + +See the [Prefect Cloud plans](https://www.prefect.io/pricing) to learn more about options for supporting object-level access control. \ No newline at end of file diff --git a/docs/2.19.x/cloud/users/overview.mdx b/docs/2.19.x/cloud/users/overview.mdx new file mode 100644 index 000000000000..0c97701400b4 --- /dev/null +++ b/docs/2.19.x/cloud/users/overview.mdx @@ -0,0 +1,55 @@ +--- +title: User accounts +sidebarTitle: Overview +--- + +Sign up for a Prefect Cloud account at [app.prefect.cloud](https://app.prefect.cloud/). + +An individual user can be invited to become a member of other accounts. + +User settings +------------------------------------------------- + +Users can access their personal settings in the [profile menu](https://app.prefect.cloud/my/profile), including: + +* Profile: View and editing basic information, such as name. +* API keys: Create and view [API keys](https://docs.prefect.io/cloud/users/api-keys/) for connecting to Prefect Cloud from the CLI or other environments. +* Preferences: Manage settings, such as color mode and default time zone. +* Feature previews: Enable or disable feature previews. + +Account roles +------------------------------------------------- + +Users who are part of an account can hold the role of Admin or Member. Admins can invite other users to join the account and manage the account's workspaces and teams. + +Admins on Pro and Custom tier Prefect Cloud accounts can grant members of the account [roles](https://docs.prefect.io/cloud/users/roles/) in a workspace, such as Runner or Viewer. Custom roles are available on Custom tier accounts. + +API keys +--------------------------------------- + +[API keys](https://docs.prefect.io/cloud/users/api-keys/) enable you to authenticate an environment to work with Prefect Cloud. + +Service accounts +-------------------------------------------------------- + +[Service accounts](https://docs.prefect.io/cloud/users/service-accounts/) enable you to create a Prefect Cloud API key that is not associated with a user account. + +Single sign-on (SSO) +-------------------------------------------------------------- + +Custom tier plans offer [single sign-on (SSO)](https://docs.prefect.io/cloud/users/sso/) integration with your team’s identity provider, including options for [directory sync and SCIM provisioning](https://docs.prefect.io/cloud/users/sso/#directory-sync). + +Audit log +------------------------------------------ + +[Audit logs](https://docs.prefect.io/cloud/users/audit-log/) provide a chronological record of activities performed by Prefect Cloud users who are members of an account. + +Object-level access control lists (ACLs) +------------------------------------------------------------------------------------------------------ + +Prefect Cloud's Custom plan offers object-level access control lists to restrict access to specific users and service accounts within a workspace. + +Teams +---------------------------------- + +Users of Custom tier Prefect Cloud accounts can be added to [Teams](https://docs.prefect.io/cloud/users/teams/) to simplify access control governance. \ No newline at end of file diff --git a/docs/2.19.x/cloud/users/rbac.mdx b/docs/2.19.x/cloud/users/rbac.mdx new file mode 100644 index 000000000000..ba7f22c614e2 --- /dev/null +++ b/docs/2.19.x/cloud/users/rbac.mdx @@ -0,0 +1,182 @@ +--- +title: Roles (RBAC) +sidebarTitle: +--- + +User and Service Account Roles +------------------------------------------------------------------------------------ + +Prefect Cloud's [Pro and Custom tiers](https://www.prefect.io/pricing) allow you to set team member access to the appropriate level within specific workspaces. + +Role-based access controls (RBAC) enable you to assign users granular permissions to perform certain activities. + +To give users access to functionality beyond the scope of Prefect’s built-in workspace roles, Custom account Admins can create custom roles for users. + +Built-in roles +--------------------------------------------------- + +Roles give users abilities at either the account level or at the individual workspace level. + +* An _account-level role_ defines a user's default permissions within an account. +* A _workspace-level role_ defines a user's permissions within a specific workspace. + +The following sections outline the abilities of the built-in, Prefect-defined ac and workspace roles. + +### Account-level roles + +The following built-in roles have permissions across an account in Prefect Cloud. + + + +* Role: Owner + * Abilities: • Set/change all account profile settings allowed to be set/changed by a Prefect user. • Add and remove account members, and their account roles. • Create and delete service accounts in the account. • Create workspaces in the account. • Implicit workspace owner access on all workspaces in the account. • Bypass SSO. +* Role: Admin + * Abilities: • Set/change all account profile settings allowed to be set/changed by a Prefect user. • Add and remove account members, and their account roles. • Create and delete service accounts in the account. • Create workspaces in the account. • Implicit workspace owner access on all workspaces in the account. • Cannot bypass SSO. +* Role: Member + * Abilities: • View account profile settings. • View workspaces I have access to in the account. • View account members and their roles. • View service accounts in the account. + + +### Workspace-level roles + +The following built-in roles have permissions within a given workspace in Prefect Cloud. + + + +* Role: Viewer + * Abilities: • View flow runs within a workspace. • View deployments within a workspace. • View all work pools within a workspace. • View all blocks within a workspace. • View all automations within a workspace. • View workspace handle and description. +* Role: Runner + * Abilities: All Viewer abilities, plus: • Run deployments within a workspace. +* Role: Developer + * Abilities: All Runner abilities, plus: • Run flows within a workspace. • Delete flow runs within a workspace. • Create, edit, and delete deployments within a workspace. • Create, edit, and delete work pools within a workspace. • Create, edit, and delete all blocks and their secrets within a workspace. • Create, edit, and delete automations within a workspace. • View all workspace settings. +* Role: Owner + * Abilities: All Developer abilities, plus: • Add and remove account members, and set their role within a workspace. • Set the workspace’s default workspace role for all users in the account. • Set, view, edit workspace settings. +* Role: Worker + * Abilities: The minimum scopes required for a worker to poll for and submit work. + + +Custom workspace roles +------------------------------------------------------------------- + +The built-in roles will serve the needs of most users, but your team may need to configure custom roles, giving users access to specific permissions within a workspace. + +Custom roles can inherit permissions from a built-in role. This enables tweaks to the role to meet your team’s needs, while ensuring users can still benefit from Prefect’s default workspace role permission curation as new functionality becomes available. + +Custom workspace roles can also be created independent of Prefect’s built-in roles. This option gives workspace admins full control of user access to workspace functionality. However, for non-inherited custom roles, the workspace admin takes on the responsibility for monitoring and setting permissions for new functionality as it is released. + +See [Role permissions](#workspace-role-permissions) for details of permissions you may set for custom roles. + +After you create a new role, it become available in the account **Members** page and the **Workspace Sharing** page for you to apply to users. + +### Inherited roles + +A custom role may be configured as an **Inherited Role**. Using an inherited role allows you to create a custom role using a set of initial permissions associated with a built-in Prefect role. Additional permissions can be added to the custom role. Permissions included in the inherited role cannot be removed. + +Custom roles created using an inherited role will follow Prefect's default workspace role permission curation as new functionality becomes available. + +To configure an inherited role when configuring a custom role, select the **Inherit permission from a default role** check box, then select the role from which the new role should inherit permissions. + +![Creating a custom role for a workspace using inherited permissions in Prefect Cloud](https://docs.prefect.io/img/ui/org-inherited-role.png) + +Workspace role permissions +--------------------------------------------------------------------------- + +The following permissions are available for custom roles. + +### Automations + + + +* Permission: View automations + * Description: User can see configured automations within a workspace. +* Permission: Create, edit, and delete automations + * Description: User can create, edit, and delete automations within a workspace. Includes permissions of View automations. + + +### Blocks + + + +* Permission: View blocks + * Description: User can see configured blocks within a workspace. +* Permission: View secret block data + * Description: User can see configured blocks and their secrets within a workspace. Includes permissions of View blocks. +* Permission: Create, edit, and delete blocks + * Description: User can create, edit, and delete blocks within a workspace. Includes permissions of View blocks and View secret block data. + + +### Deployments + + + +* Permission: View deployments + * Description: User can see configured deployments within a workspace. +* Permission: Run deployments + * Description: User can run deployments within a workspace. This does not give a user permission to execute the flow associated with the deployment. This only gives a user (via their key) the ability to run a deployment — another user/key must actually execute that flow, such as a service account with an appropriate role. Includes permissions of View deployments. +* Permission: Create and edit deployments + * Description: User can create and edit deployments within a workspace. Includes permissions of View deployments and Run deployments. +* Permission: Delete deployments + * Description: User can delete deployments within a workspace. Includes permissions of View deployments, Run deployments, and Create and edit deployments. + + +### Flows + + + +* Permission: View flows and flow runs + * Description: User can see flows and flow runs within a workspace. +* Permission: Create, update, and delete saved search filters + * Description: User can create, update, and delete saved flow run search filters configured within a workspace. Includes permissions of View flows and flow runs. +* Permission: Create, update, and run flows + * Description: User can create, update, and run flows within a workspace. Includes permissions of View flows and flow runs. +* Permission: Delete flows + * Description: User can delete flows within a workspace. Includes permissions of View flows and flow runs and Create, update, and run flows. + + +### Notifications + + + +* Permission: View notification policies + * Description: User can see notification policies configured within a workspace. +* Permission: Create and edit notification policies + * Description: User can create and edit notification policies configured within a workspace. Includes permissions of View notification policies. +* Permission: Delete notification policies + * Description: User can delete notification policies configured within a workspace. Includes permissions of View notification policies and Create and edit notification policies. + + +### Task run concurrency + + + +* Permission: View concurrency limits + * Description: User can see configured task run concurrency limits within a workspace. +* Permission: Create, edit, and delete concurrency limits + * Description: User can create, edit, and delete task run concurrency limits within a workspace. Includes permissions of View concurrency limits. + + +### Work pools + + + +* Permission: View work pools + * Description: User can see work pools configured within a workspace. +* Permission: Create, edit, and pause work pools + * Description: User can create, edit, and pause work pools configured within a workspace. Includes permissions of View work pools. +* Permission: Delete work pools + * Description: User can delete work pools configured within a workspace. Includes permissions of View work pools and Create, edit, and pause work pools. + + +### Workspace management + + + +* Permission: View information about workspace service accounts + * Description: User can see service accounts configured within a workspace. +* Permission: View information about workspace users + * Description: User can see user accounts for users invited to the workspace. +* Permission: View workspace settings + * Description: User can see settings configured within a workspace. +* Permission: Edit workspace settings + * Description: User can edit settings for a workspace. Includes permissions of View workspace settings. +* Permission: Delete the workspace + * Description: User can delete a workspace. Includes permissions of View workspace settings and Edit workspace settings. diff --git a/docs/2.19.x/cloud/users/service-account.mdx b/docs/2.19.x/cloud/users/service-account.mdx new file mode 100644 index 000000000000..f4147335acdb --- /dev/null +++ b/docs/2.19.x/cloud/users/service-account.mdx @@ -0,0 +1,52 @@ +--- +title: Service Accounts +--- + +Service accounts enable you to create a Prefect Cloud API key that is not associated with a user account. Service accounts are typically used to configure API access for running workers or executing deployment flow runs on remote infrastructure. + +Service accounts are non-user accounts that have the following features: + +* Prefect Cloud [API keys](https://docs.prefect.io/cloud/users/api-keys/) +* [Roles](https://docs.prefect.io/cloud/users/roles/) and permissions + +Using service account credentials, you can [configure an execution environment](https://docs.prefect.io/cloud/connecting/#configure-a-local-execution-environment) to interact with your Prefect Cloud workspaces without a user having to manually log in from that environment. Service accounts may be created, added to workspaces, have their roles changed, or deleted without affecting other user accounts. + +Select **Service Accounts** to view, create, or edit service accounts. + +![Viewing service accounts in Prefect Cloud.](/images/service-accounts1.png) + +Service accounts are created at the account level, but individual workspaces may be shared with the service account. See [workspace sharing](https://docs.prefect.io/cloud/workspaces/#workspace-sharing) for more information. + +Service account credentials + +When you create a service account, Prefect Cloud creates a new API key for the account and provides the API configuration command for the execution environment. Save these to a safe location for future use. If the access credentials are lost or compromised, you should regenerate the credentials from the service account page. + +Service account roles + +Service accounts are created at the account level, and can then be added to workspaces within the account. You may apply any valid _workspace-level_ role to a service account. + +Create a service account +----------------------------------------------------------------------- + +Within your account, on the **Service Accounts** page, select the **+** icon to create a new service account. You'll be prompted to configure: + +* The service account name. This name must be unique within your account. +* An expiration date, or the **Never Expire** option. + + +**Service account roles** + + +A service account may only be a Member of an account. You may apply any valid _workspace-level_ role to a service account when it is [added to a workspace](https://docs.prefect.io/cloud/workspaces/#workspace-sharing). + + +Select **Create** to create the new service account. + + +**Warning** + +Note that an API key cannot be revealed again in the UI after it is generated, so copy the key to a secure location. + +You can change the API key and expiration for a service account by rotating the API key. Select **Rotate API Key** from the menu on the left side of the service account's information on this page. Optionally, you can set a period of time for your old service account key to remain active. + +To delete a service account, select **Remove** from the menu on the left side of the service account's information. \ No newline at end of file diff --git a/docs/2.19.x/cloud/users/sso.mdx b/docs/2.19.x/cloud/users/sso.mdx new file mode 100644 index 000000000000..11c7b3bb39c6 --- /dev/null +++ b/docs/2.19.x/cloud/users/sso.mdx @@ -0,0 +1,49 @@ +--- +title: +--- +# Single Sign-On (SSO) - Prefect Docs +[](https://github.com/PrefectHQ/prefect/edit/main/docs/cloud/users/sso.md "Edit this page") + +Prefect Cloud's [Custom plans](https://www.prefect.io/pricing) offer single sign-on (SSO) integration with your team’s identity provider. SSO integration can be set up with any identity provider that supports: + +* OIDC +* SAML 2.0 + +When using SSO, Prefect Cloud won't store passwords for any accounts managed by your identity provider. Members of your Prefect Cloud account will instead log in and authenticate using your identity provider. + +Once your SSO integration has been set up, non-admins will be required to authenticate through the SSO provider when accessing account resources. + +See the [Prefect Cloud plans](https://www.prefect.io/pricing) to learn more about options for supporting more users and workspaces, service accounts, and SSO. + +Configuring SSO +----------------------------------------------------- + +Within your account, select the **SSO** page to enable SSO for users. + +If you haven't enabled SSO for a domain yet, enter the email domains for which you want to configure SSO in Prefect Cloud and save it. + +Under **Enabled Domains**, select the domains from the **Domains** list, then select **Generate Link**. This step creates a link you can use to configure SSO with your identity provider. + +![Generating a configuration link for single sign-on in the Prefect Cloud UI.](/images/sso1.png) + +Using the provided link navigate to the Identity Provider Configuration dashboard and select your identity provider to continue configuration. If your provider isn't listed, you can continue with the `SAML` or `Open ID Connect` choices instead. + +![Opening the Identity Provider Configuration dashboard.](/images/sso2.png) + +Once you complete SSO configuration your users will be required to authenticate via your identity provider when accessing account resources, giving you full control over application access. + +Directory sync +--------------------------------------------------- + +**Directory sync** automatically provisions and de-provisions users for your account. + +Provisioned users are given basic “Member” roles and will have access to any resources that role entails. + +When a user is unassigned from the Prefect Cloud application in your identity provider, they will automatically lose access to Prefect Cloud resources, allowing your IT team to control access to Prefect Cloud without ever signing into the Prefect UI. + +SCIM Provisioning +--------------------------------------------------------- + +Custom accounts have access to SCIM for user provisioning. The SSO tab provides access to enable SCIM provisioning. + +![Cloud UI with SSO & SCIM provisioned](/images/sso3.png) \ No newline at end of file diff --git a/docs/2.19.x/cloud/users/teams.mdx b/docs/2.19.x/cloud/users/teams.mdx new file mode 100644 index 000000000000..e37c0f9ac1a1 --- /dev/null +++ b/docs/2.19.x/cloud/users/teams.mdx @@ -0,0 +1,13 @@ +--- +title: Teams +--- + +Prefect Cloud's [Custom plan](https://www.prefect.io/pricing) offers team management to simplify access control governance. + +Account Admins can configure teams and team membership from the account settings menu by clicking **Teams**. Teams are composed of users and service accounts. Teams can be added to workspaces or object access control lists just like users and service accounts. + +![Viewing a team in the Prefect Cloud UI.](/images/teams.png) + +If SCIM is enabled on your account, the set of teams and the users within them is governed by your IDP. Prefect Cloud service accounts, which are not governed by your IDP, can be still be added to your existing set of teams. + +See the [Prefect Cloud plans](https://www.prefect.io/pricing) to learn more about options for supporting teams. \ No newline at end of file diff --git a/docs/2.19.x/cloud/workspaces.mdx b/docs/2.19.x/cloud/workspaces.mdx new file mode 100644 index 000000000000..b0b2d5ffb602 --- /dev/null +++ b/docs/2.19.x/cloud/workspaces.mdx @@ -0,0 +1,124 @@ +--- +title: Workspaces +--- + +A workspace is a discrete environment within Prefect Cloud for your workflows and blocks. Workspaces are available to Prefect Cloud accounts only. + +Workspaces can be used to organize and compartmentalize your workflows. For example, you can use separate workspaces to isolate dev, staging, and prod environments, or to provide separation between different teams. + +When you first log into Prefect Cloud, you will be prompted to create your own initial workspace. After creating your workspace, you'll be able to view flow runs, flows, deployments, and other workspace-specific features in the Prefect Cloud UI. + +![Viewing a workspace dashboard in the Prefect Cloud UI.](/images/workspaces1.png) + +Select a workspace name in the navigation menu to see all workspaces you can access. + +![Viewing all available workspaces in the Prefect Cloud UI.](/images/workspaces2.png) + +Your list of available workspaces may include: + +* Your own account's workspace. +* Workspaces in an account to which you've been invited and have been given access as an Admin or Member. + + +**Workspace-specific features** + + +Each workspace keeps track of its own: + + - [Flow runs](https://docs.prefect.io/ui/flow-runs/) and task runs executed in an environment that is s[yncing with the workspace](https://docs.prefect.io/ui/cloud/#workspaces) + - [Flows](https://docs.prefect.io/concepts/flows/) associated with flow runs or deployments observed by the Prefect Cloud API + - [Deployments](https://docs.prefect.io/concepts/deployments/) + - [Work pools](https://docs.prefect.io/concepts/work-pools/) + - [Blocks](https://docs.prefect.io/ui/blocks/) and [Storage](https://docs.prefect.io/concepts/storage/) + - [Automations](https://docs.prefect.io/concepts/automations/) + + + +Your user permissions within workspaces may vary. Account admins can assign roles and permissions at the workspace level. + +Create a workspace +----------------------------------------------------------- + +On the Account Workspaces dropdown or the **Workspaces** page select the **+** icon to create a new workspace. + +You'll be prompted to configure: + +* The **Workspace Owner** from the dropdown account menu options. +* The **Workspace Name** must be unique within the account. +* An optional description for the workspace. + +![Creating a new workspace in the Prefect Cloud UI.](/images/workspaces3.png) + +Select **Create** to create the new workspace. The number of available workspaces varies by [Prefect Cloud plan](https://www.prefect.io/pricing/). See [Pricing](https://www.prefect.io/pricing/) if you need additional workspaces or users. + +Workspace settings +----------------------------------------------------------- + +Within a workspace, select **Settings -> General** to view or edit workspace details. + +![Managing a workspace in the Prefect Cloud UI.](/images/workspaces4.png) + +On this page you can edit workspace details or delete the workspace. + + +**Deleting a workspace** + + +Deleting a workspace deletes all deployments, flow run history, work pools, and notifications configured in workspace. + +Workspace access +-------------------------------------------------------- + +Within a Prefect Cloud Pro or Custom tier account, Workspace Owners can invite other people to be members and provision [service accounts](https://docs.prefect.io/ui/service-accounts/) to a workspace. In addition to giving the user access to the workspace, a Workspace Owner assigns a [workspace role](https://docs.prefect.io/ui/roles/) to the user. The role specifies the scope of permissions for the user within the workspace. + +As a Workspace Owner, select **Workspaces -> Sharing** to manage members and service accounts for the workspace. + +If you've previously invited individuals to your account or provisioned service accounts, you'll see them listed here. + +![Managing sharing in a workspace in the Prefect Cloud UI.](/images/workspaces5.png) + +To invite someone to an account, select the Members **+** icon. You can select from a list of existing account members. + +Select a Role for the user. This will be the initial role for the user within the workspace. A workspace Owner can change this role at any time. + +Select **Send** to initiate the invitation. + +To add a service account to a workspace, select the Service Accounts **+** icon. You can select from a list of configured service accounts. Select a Workspace Role for the service account. This will be the initial role for the service account within the workspace. A workspace Owner can change this role at any time. Select **Share** to finalize adding the service account. + +To remove a workspace member or service account, select **Remove** from the menu on the right side of the user or service account information on this page. + +Workspace transfer +----------------------------------------------------------- + +Workspace transfer enables you to move an existing workspace from one account to another. + +Workspace transfer retains existing workspace configuration and flow run history, including blocks, deployments, notifications, work pools, and logs. + + +**Workspace transfer permissions** + +Workspace transfer must be initiated or approved by a user with admin privileges for the workspace to be transferred. + +To initiate a workspace transfer between personal accounts, contact [support@prefect.io](mailto:support@prefect.io). + + +### Transfer a workspace + +To transfer a workspace, select **Settings** -> **General** within the workspace. Then, from the three dot menu in the upper right of the page, select **Transfer**. + +![Initiating a workspace transfer in the Prefect Cloud UI.](/images/workspaces6.png) + +The **Transfer Workspace** page shows the workspace to be transferred on the left. Select the target account for the workspace on the right. + + +**Workspace transfer impact on accounts** + + +Workspace transfer may impact resource usage and costs for source and target accounts. + +When you transfer a workspace, users, API keys, and service accounts may lose access to the workspace. Audit log will no longer track activity on the workspace. Flow runs ending outside of the destination account’s flow run retention period will be removed. You may also need to update Prefect CLI profiles and execution environment settings to access the workspace's new location. + +You may also incur new charges in the target account to accommodate the transferred workspace. + +The **Transfer Workspace** page outlines the impacts of transferring the selected workspace to the selected target. Please review these notes carefully before selecting **Transfer** to transfer the workspace. + \ No newline at end of file diff --git a/docs/2.19.x/community/contributing.mdx b/docs/2.19.x/community/contributing.mdx new file mode 100644 index 000000000000..5b4d71e200ab --- /dev/null +++ b/docs/2.19.x/community/contributing.mdx @@ -0,0 +1,256 @@ +--- +title: Contributing +--- + +Thanks for considering contributing to Prefect! + +Setting up a development environment +----------------------------------------------------------------------------------------------- + +First, download the source code and install an editable version of the Python package: + +```python +# Clone the repository +git clone https://github.com/PrefectHQ/prefect.git +cd prefect + +# We recommend using a virtual environment + +python -m venv .venv +source .venv/bin/activate + +# Install the package with development dependencies + +pip install -e ".[dev]" + +# Setup pre-commit hooks for required formatting + +pre-commit install + +``` + + +If you don't want to install the pre-commit hooks, you can manually install the formatting dependencies with: + +``` +pip install $(./scripts/precommit-versions.py) + +``` + + +You'll need to run `black` and `ruff` before a contribution can be accepted. + +After installation, you can run the test suite with `pytest`: + +``` +# Run all the tests +pytest tests + +# Run a subset of tests + +pytest tests/test_flows.py + +``` + + +**Building the Prefect UI** + +If you intend to run a local Prefect server during development, you must first build the UI. See [UI development](#ui-development) for instructions. + + +Prefect Code of Conduct +--------------------------------------------------------------------- + +### Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. + +### Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +### Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +### Scope + +This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +### Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting Chris White at [chris@prefect.io](mailto:chris@prefect.io). All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +### Attribution + +This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/), version 1.4, available at [https://www.contributor-covenant.org/version/1/4/code-of-conduct.html](https://www.contributor-covenant.org/version/1/4/code-of-conduct.html) + +For answers to common questions about this code of conduct, see [https://www.contributor-covenant.org/faq](https://www.contributor-covenant.org/faq) + +## Developer Tooling + +The Prefect CLI provides several helpful commands to aid development. + +Start all services with hot-reloading on code changes (requires UI dependencies to be installed): + +``` +prefect dev start +``` + +Start a Prefect API that reloads on code changes: + +``` +prefect dev api +``` + +Start a Prefect worker that reloads on code changes: + +``` +prefect dev agent +``` + +### UI development + +Developing the Prefect UI requires that [npm](https://github.com/npm/cli) is installed. + +Start a development UI that reloads on code changes: + +``` +prefect dev ui +``` + +Build the static UI (the UI served by `prefect server start`): + +``` +prefect dev build-ui +``` + +### Docs Development + +Prefect uses [mkdocs](https://www.mkdocs.org/) for the docs website and the [mkdocs-material](https://squidfunk.github.io/mkdocs-material/) theme. While we use `mkdocs-material-insiders` for production, builds can still happen without the extra plugins. Deploy previews are available on pull requests, so you'll be able to browse the final look of your changes before merging. + +To build the docs: +``` +mkdocs build +``` + +To serve the docs locally at [http://127.0.0.1:8000/](http://127.0.0.1:8000/): +``` +mkdocs serve +``` + +For additional mkdocs help and options: +``` +mkdocs --help +``` + +We use the [mkdocs-material](https://squidfunk.github.io/mkdocs-material/) theme. To add additional JavaScript or CSS to the docs, please see the theme documentation [here](https://squidfunk.github.io/mkdocs-material/customization/). + +Internal developers can install the production theme by running: + +``` +pip install -e git+https://github.com/PrefectHQ/mkdocs-material-insiders.git#egg=mkdocs-material +mkdocs build # or mkdocs build --config-file mkdocs.insiders.yml if needed + +``` + + +### Kubernetes development + +Generate a manifest to deploy a development API to a local kubernetes cluster: + +``` +prefect dev kubernetes-manifest + +``` + + +To access the Prefect UI running in a Kubernetes cluster, use the `kubectl port-forward` command to forward a port on your local machine to an open port within the cluster. For example: + +``` +kubectl port-forward deployment/prefect-dev 4200:4200 + +``` + + +This forwards port 4200 on the default internal loop IP for localhost to the Prefect server deployment. + +To tell the local `prefect` command how to communicate with the Prefect API running in Kubernetes, set the `PREFECT_API_URL` environment variable: + +``` +export PREFECT_API_URL=http://localhost:4200/api + +``` + + +Since you previously configured port forwarding for the localhost port to the Kubernetes environment, you’ll be able to interact with the Prefect API running in Kubernetes when using local Prefect CLI commands. + +### Adding database migrations + +To make changes to a table, first update the SQLAlchemy model in `src/prefect/server/database/orm_models.py`. For example, if you wanted to add a new column to the `flow_run` table, you would add a new column to the `FlowRun` model: + +``` +# src/prefect/server/database/orm_models.py + +@declarative_mixin +class ORMFlowRun(ORMRun): + """SQLAlchemy model of a flow run.""" + ... + new_column = Column(String, nullable=True) # <-- add this line + +``` + + +Next, you will need to generate new migration files. You must generate a new migration file for each database type. Migrations will be generated for whatever database type `PREFECT_API_DATABASE_CONNECTION_URL` is set to. See [here](https://docs.prefect.io/concepts/database/#configuring-the-database) for how to set the database connection URL for each database type. + +To generate a new migration file, run the following command: + +``` +prefect server database revision --autogenerate -m "" + +``` + + +Try to make your migration name brief but descriptive. For example: + +* `add_flow_run_new_column` +* `add_flow_run_new_column_idx` +* `rename_flow_run_old_column_to_new_column` + +The `--autogenerate` flag will automatically generate a migration file based on the changes to the models. + + +**Always inspect the output of `--autogenerate`** + + +`--autogenerate` will generate a migration file based on the changes to the models. However, it is not perfect. Be sure to check the file to make sure it only includes the changes you want to make. Additionally, you may need to remove extra statements that were included and not related to your change. + +The new migration can be found in the `src/prefect/server/database/migrations/versions/` directory. Each database type has its own subdirectory. For example, the SQLite migrations are stored in `src/prefect/server/database/migrations/versions/sqlite/`. + +After you have inspected the migration file, you can apply the migration to your database by running the following command: + +``` +prefect server database upgrade -y + +``` + + +Once you have successfully created and applied migrations for all database types, make sure to update `MIGRATION-NOTES.md` to document your additions. \ No newline at end of file diff --git a/docs/2.19.x/community/style.mdx b/docs/2.19.x/community/style.mdx new file mode 100644 index 000000000000..d24e4a1a6e04 --- /dev/null +++ b/docs/2.19.x/community/style.mdx @@ -0,0 +1,248 @@ +--- +sidebarTitle: Style +title: Code style and practices +--- + +Generally, we follow the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html). This document covers sections where we differ or where additional clarification is necessary. + +Imports +------------------------------------- + +A brief collection of rules and guidelines for how imports should be handled in this repository. + +### Imports in `__init__` files + +Leave `__init__` files empty unless exposing an interface. If you must expose objects to present a simpler API, please follow these rules. + +#### Exposing objects from submodules + +If importing objects from submodules, the `__init__` file should use a relative import. This is [required for type checkers](https://github.com/microsoft/pyright/blob/main/docs/typed-libraries.md#library-interface) to understand the exposed interface. + +```python +# Correct +from .flows import flow + +``` + + +```python +# Wrong +from prefect.flows import flow + +``` + + +#### Exposing submodules + +Generally, submodules should _not_ be imported in the `__init__` file. Submodules should only be exposed when the module is designed to be imported and used as a namespaced object. + +For example, we do this for our schema and model modules because it is important to know if you are working with an API schema or database model, both of which may have similar names. + +```python +import prefect.server.schemas as schemas + +# The full module is accessible now +schemas.core.FlowRun + +``` + + +If exposing a submodule, use a relative import as you would when exposing an object. + +```python +# Correct +from . import flows + +``` + + +```python +# Wrong +import prefect.flows + +``` + + +#### Importing to run side-effects + +Another use case for importing submodules is perform global side-effects that occur when they are imported. + +Often, global side-effects on import are a dangerous pattern. Avoid them if feasible. + +We have a couple acceptable use-cases for this currently: + +* To register dispatchable types, e.g. `prefect.serializers`. +* To extend a CLI application e.g. `prefect.cli`. + +### Imports in modules + +#### Importing other modules + +The `from` syntax should be reserved for importing objects from modules. Modules should not be imported using the `from` syntax. + +```python +# Correct +import prefect.server.schemas # use with the full name +import prefect.server.schemas as schemas # use the shorter name + +``` + + +```python +# Wrong +from prefect.server import schemas + +``` + + +Unless in an `__init__.py` file, relative imports should not be used. + +```python +# Correct +from prefect.utilities.foo import bar + +``` + + +```python +# Wrong +from .utilities.foo import bar + +``` + + +Imports dependent on file location should never be used without explicit indication it is relative. This avoids confusion about the source of a module. + +```python +# Correct +from . import test + +``` + + +#### Resolving circular dependencies + +Sometimes, we must defer an import and perform it _within_ a function to avoid a circular dependency. + +```python +## This function in `settings.py` requires a method from the global `context` but the context +## uses settings +def from_context(): + from prefect.context import get_profile_context + + ... + +``` + + +Attempt to avoid circular dependencies. This often reveals overentanglement in the design. + +When performing deferred imports, they should all be placed at the top of the function. + +##### With type annotations + +If you are just using the imported object for a type signature, you should use the `TYPE_CHECKING` flag. + +```python +# Correct +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from prefect.server.schemas.states import State + +def foo(state: "State"): + pass + +``` + + +Note that usage of the type within the module will need quotes e.g. `"State"` since it is not available at runtime. + +#### Importing optional requirements + +We do not have a best practice for this yet. See the `kubernetes`, `docker`, and `distributed` implementations for now. + +#### Delaying expensive imports + +Sometimes, imports are slow. We'd like to keep the `prefect` module import times fast. In these cases, we can lazily import the slow module by deferring import to the relevant function body. For modules that are consumed by many functions, the pattern used for optional requirements may be used instead. + +Command line interface (CLI) output messages +------------------------------------------------------------------------------------------------------------- + +Upon executing a command that creates an object, the output message should offer: - A short description of what the command just did. - A bullet point list, rehashing user inputs, if possible. - Next steps, like the next command to run, if applicable. - Other relevant, pre-formatted commands that can be copied and pasted, if applicable. - A new line before the first line and after the last line. + +Output Example: + +```js +$ prefect work-queue create testing + +Created work queue with properties: + name - 'abcde' + uuid - 940f9828-c820-4148-9526-ea8107082bda + tags - None + deployment_ids - None + +Start an agent to pick up flows from the created work queue: + prefect agent start -q 'abcde' + +Inspect the created work queue: + prefect work-queue inspect 'abcde' + +``` + + +Additionally: + +* Wrap generated arguments in apostrophes (') to ensure validity by using suffixing formats with `!r`. +* Indent example commands, instead of wrapping in backticks (\`). +* Use placeholders if the example cannot be pre-formatted completely. +* Capitalize placeholder labels and wrap them in less than (\<) and greater than (>) signs. +* Utilize `textwrap.dedent` to remove extraneous spacing for strings that are written with triple quotes ("""). + +Placeholder Example: + +``` +Create a work queue with tags: + prefect work-queue create '' -t '' -t '' + +``` + + +Dedent Example: + +```python +from textwrap import dedent +... +output_msg = dedent( + f""" + Created work queue with properties: + name - {name!r} + uuid - {result} + tags - {tags or None} + deployment_ids - {deployment_ids or None} + + Start an agent to pick up flows from the created work queue: + prefect agent start -q {name!r} + + Inspect the created work queue: + prefect work-queue inspect {name!r} + """ +) + +``` + + +API Versioning +--------------------------------------------------- + +The Prefect client can be run separately from the Prefect orchestration server and communicate entirely via an API. Among other things, the Prefect client includes anything that runs task or flow code, (e.g. agents, and the Python client) or any consumer of Prefect metadata, (e.g. the Prefect UI, and CLI). The Prefect server stores this metadata and serves it via the REST API. + +Sometimes, we make breaking changes to the API (for good reasons). In order to check that a Prefect client is compatible with the API it's making requests to, every API call the client makes includes a three-component `API_VERSION` header with major, minor, and patch versions. + +For example, a request with the `X-PREFECT-API-VERSION=3.2.1` header has a major version of `3`, minor version `2`, and patch version `1`. + +This version header can be changed by modifying the `API_VERSION` constant in `prefect.server.api.server`. + +When making a breaking change to the API, we should consider if the change might be _backwards compatible for clients_, meaning that the previous version of the client would still be able to make calls against the updated version of the server code. This might happen if the changes are purely additive: such as adding a non-critical API route. In these cases, we should make sure to bump the patch version. + +In almost all other cases, we should bump the minor version, which denotes a non-backwards-compatible API change. We have reserved the major version chanes to denote also-backwards compatible change that might be significant in some way, such as a major release milestone. \ No newline at end of file diff --git a/docs/2.19.x/community/versioning.mdx b/docs/2.19.x/community/versioning.mdx new file mode 100644 index 000000000000..47b566919300 --- /dev/null +++ b/docs/2.19.x/community/versioning.mdx @@ -0,0 +1,49 @@ +--- +sidebarTitle: Versioning +title: Understanding version numbers +--- + +Versions are composed of three parts: MAJOR.MINOR.PATCH. For example, the version 2.5.0 has a major version of 2, a minor version of 5, and patch version of 0. + +Occasionally, we will add a suffix to the version such as `rc`, `a`, or `b`. These indicate pre-release versions that users can opt-into installing to test functionality before it is ready for release. + +Each release will increase one of the version numbers. If we increase a number other than the patch version, the versions to the right of it will be reset to zero. + +Prefect's versioning scheme +---------------------------------------------------------------------------- + +Prefect will increase the major version when significant and widespread changes are made to the core product. It is very unlikely that the major version will change without extensive warning. + +Prefect will increase the minor version when: + +* Introducing a new concept that changes how Prefect can be used +* Changing an existing concept in a way that fundamentally alters how it is used +* Removing a deprecated feature + +Prefect will increase the patch version when: + +* Making enhancements to existing features +* Fixing behavior in existing features +* Adding new functionality to existing concepts +* Updating dependencies + +Breaking changes and deprecation +--------------------------------------------------------------------------------------- + +A breaking change means that your code will need to change to use a new version of Prefect. We strive to avoid breaking changes in all releases. + +At times, Prefect will deprecate a feature. This means that a feature has been marked for removal in the future. When you use it, you may see warnings that it will be removed. A feature is deprecated when it will no longer be maintained. Frequently, a deprecated feature will have a new and improved alternative. Deprecated features will be retained for at least **3** minor version increases or **6 months**, whichever is longer. We may retain deprecated features longer than this time period. + +Prefect will sometimes include changes to behavior to fix a bug. These changes are not categorized as breaking changes. + +Client compatibility with Prefect +----------------------------------------------------------------------------------------- + +When running a Prefect server, you are in charge of ensuring the version is compatible with those of the clients that are using the server. Prefect aims to maintain backwards compatibility with old clients for each server release. In contrast, sometimes new clients cannot be used with an old server. The new client may expect the server to support functionality that it does not yet include. For this reason, we recommend that all clients are the same version as the server or older. + +For example, a client on 2.1.0 can be used with a server on 2.5.0. A client on 2.5.0 cannot be used with a server on 2.1.0. + +Client compatibility with Cloud +------------------------------------------------------------------------------------- + +Prefect Cloud targets compatibility with all versions of Prefect clients. If you encounter a compatibility issue, please [file a bug report](https://github.com/prefectHQ/prefect/issues/new/choose). \ No newline at end of file diff --git a/docs/2.19.x/concepts/artifacts.mdx b/docs/2.19.x/concepts/artifacts.mdx new file mode 100644 index 000000000000..314a08a8c1d0 --- /dev/null +++ b/docs/2.19.x/concepts/artifacts.mdx @@ -0,0 +1,286 @@ +--- +title: Artifacts +--- + +Artifacts are persisted outputs such as tables, Markdown, or links. They are stored on Prefect Cloud or a Prefect server instance and rendered in the Prefect UI. Artifacts make it easy to track and monitor the objects that your flows produce and update over time. + +![Markdown artifact sales report screenshot](/images/artifacts-1.png) + +Published artifacts may be associated with a particular task run or flow run. Artifacts can also be created outside of any flow run context. + +Whether you're publishing links, Markdown, or tables, artifacts provide a powerful and flexible way to showcase data within your workflows. + +With artifacts, you can easily manage and share information with your team, providing valuable insights and context. + +Common use cases for artifacts include: + +* Debugging: By publishing data that you care about in the UI, you can easily see when and where your results were written. If an artifact doesn't look the way you expect, you can find out which flow run last updated it, and you can click through a link in the artifact to a storage location (such as an S3 bucket). +* Data quality checks: Artifacts can be used to publish data quality checks from in-progress tasks. This can help ensure that data quality is maintained throughout the pipeline. During long-running tasks such as ML model training, you might use artifacts to publish performance graphs. This can help you visualize how well your models are performing and make adjustments as needed. You can also track the versions of these artifacts over time, making it easier to identify changes in your data. +* Documentation: Artifacts can be used to publish documentation and sample data to help you keep track of your work and share information with your colleagues. For instance, artifacts allow you to add a description to let your colleagues know why this piece of data is important. + +Creating artifacts +----------------------------------------------------------- + +Creating artifacts allows you to publish data from task and flow runs or outside of a flow run context. Currently, you can render three artifact types: links, Markdown, and tables. + + +**Artifacts render individually** + +Please note that every artifact created within a task will be displayed as an individual artifact in the Prefect UI. This means that each call to `create_link_artifact()` or `create_markdown_artifact()` generates a distinct artifact. + +Unlike the `print()` command, where you can concatenate multiple calls to include additional items in a report, within a task, these commands must be used multiple times if necessary. + +To create artifacts like reports or summaries using `create_markdown_artifact()`, compile your message string separately and then pass it to `create_markdown_artifact()` to create the complete artifact. + + +### Creating link artifacts + +To create a link artifact, use the `create_link_artifact()` function. To create multiple versions of the same artifact and/or view them on the Artifacts page of the Prefect UI, provide a `key` argument to the `create_link_artifact()` function to track an artifact's history over time. Without a `key`, the artifact will only be visible in the Artifacts tab of the associated flow run or task run. + +```python +from prefect import flow, task +from prefect.artifacts import create_link_artifact + +@task +def my_first_task(): + create_link_artifact( + key="irregular-data", + link="https://nyc3.digitaloceanspaces.com/my-bucket-name/highly_variable_data.csv", + description="## Highly variable data", + ) + +@task +def my_second_task(): + create_link_artifact( + key="irregular-data", + link="https://nyc3.digitaloceanspaces.com/my-bucket-name/low_pred_data.csv", + description="# Low prediction accuracy", + ) + +@flow +def my_flow(): + my_first_task() + my_second_task() + +if __name__ == "__main__": + my_flow() + +``` + + +**Tip** + +You can specify multiple artifacts with the same key to more easily track something very specific that you care about, such as irregularities in your data pipeline. + + +After running the above flows, you can find your new artifacts in the Artifacts page of the UI. Click into the "irregular-data" artifact and see all versions of it, along with custom descriptions and links to the relevant data. + +![Link artifact details with multiple versions](/images/artifacts-23.png) + +Here, you'll also be able to view information about your artifact such as its associated flow run or task run id, previous and future versions of the artifact (multiple artifacts can have the same key in order to show lineage), the data you've stored (in this case a Markdown-rendered link), an optional Markdown description, and when the artifact was created or updated. + +To make the links more readable for you and your collaborators, you can pass in a `link_text` argument for your link artifacts: + +```python +from prefect import flow +from prefect.artifacts import create_link_artifact + +@flow +def my_flow(): + create_link_artifact( + key="my-important-link", + link="https://www.prefect.io/", + link_text="Prefect", + ) + +if __name__ == "__main__": + my_flow() + +``` + + +In the above example, the `create_link_artifact` method is used within a flow to create a link artifact with a key of `my-important-link`. The `link` parameter is used to specify the external resource to be linked to, and `link_text` is used to specify the text to be displayed for the link. An optional `description` could also be added for context. + +### Creating Markdown artifacts + +To create a Markdown artifact, you can use the `create_markdown_artifact()` function. To create multiple versions of the same artifact and/or view them on the Artifacts page of the Prefect UI, provide a `key` argument to the `create_markdown_artifact()` function to track an artifact's history over time. Without a `key`, the artifact will only be visible in the Artifacts tab of the associated flow run or task run. + +**Don't indent Markdown** + +Markdown in mult-line strings must be unindented to be interpreted correctly. + + + +```python +from prefect import flow, task +from prefect.artifacts import create_markdown_artifact + +@task +def markdown_task(): + na_revenue = 500000 + markdown_report = f"""# Sales Report + +## Summary + +In the past quarter, our company saw a significant increase in sales, with a total revenue of $1,000,000. +This represents a 20% increase over the same period last year. + +## Sales by Region + +| Region | Revenue | +|:--------------|-------:| +| North America | ${na_revenue:,} | +| Europe | $250,000 | +| Asia | $150,000 | +| South America | $75,000 | +| Africa | $25,000 | + +## Top Products + +1. Product A - $300,000 in revenue +2. Product B - $200,000 in revenue +3. Product C - $150,000 in revenue + +## Conclusion + +Overall, these results are very encouraging and demonstrate the success of our sales team in increasing revenue +across all regions. However, we still have room for improvement and should focus on further increasing sales in +the coming quarter. +""" + create_markdown_artifact( + key="gtm-report", + markdown=markdown_report, + description="Quarterly Sales Report", + ) + +@flow() +def my_flow(): + markdown_task() + + +if __name__ == "__main__": + my_flow() + +``` + + +After running the above flow, you should see your "gtm-report" artifact in the Artifacts page of the UI. + +![Markdown sales report screenshot](/images/artifacts-3.png) + +As with all artifacts, you'll be able to view the associated flow run or task run id, previous and future versions of the artifact, your rendered Markdown data, and your optional Markdown description. + +### Create table artifacts + +You can create a table artifact by calling `create_table_artifact()`. To create multiple versions of the same artifact and/or view them on the Artifacts page of the Prefect UI, provide a `key` argument to the `create_table_artifact()` function to track an artifact's history over time. Without a `key`, the artifact will only be visible in the artifacts tab of the associated flow run or task run. + +Note + +The `create_table_artifact()` function accepts a `table` argument, which can be provided as either a list of lists, a list of dictionaries, or a dictionary of lists. + +```python +from prefect.artifacts import create_table_artifact + +def my_fn(): + highest_churn_possibility = [ + {'customer_id':'12345', 'name': 'John Smith', 'churn_probability': 0.85 }, + {'customer_id':'56789', 'name': 'Jane Jones', 'churn_probability': 0.65 } + ] + + create_table_artifact( + key="personalized-reachout", + table=highest_churn_possibility, + description= "# Marvin, please reach out to these customers today!" + ) + +if __name__ == "__main__": + my_fn() + +``` + + +![Table artifact with customer info](/images/artifacts-4.png) As you can see, you don't need to create an artifact in a flow run context. You can create one anywhere in a Python script and see it in the Prefect UI. + +Managing artifacts +----------------------------------------------------------- + +### Reading artifacts + +In the Prefect UI, you can view all of the latest versions of your artifacts and click into a specific artifact to see its lineage over time. Additionally, you can inspect all versions of an artifact with a given key from the CLI by running: + +``` +prefect artifact inspect + +``` + + +or view all artifacts by running: + +You can also use the [Prefect REST API](https://app.prefect.cloud/api/docs#tag/Artifacts/operation/read_artifacts_api_accounts__account_id__workspaces__workspace_id__artifacts_filter_post) to programmatically filter your results. + +### Fetching artifacts + +In Python code, you can retrieve an existing artifact with the `Artifact.get` class method: + +```python +from prefect.artifacts import Artifact + +my_retrieved_artifact = Artifact.get("my_artifact_key") + +``` + + +### Deleting artifacts + +You can delete an artifact directly using the CLI to delete specific artifacts with a given key or id: + +``` +prefect artifact delete + +``` + + +``` +prefect artifact delete --id + +``` + + +Alternatively, you can delete artifacts using the [Prefect REST API](https://docs.prefect.io/latest/api-ref/rest-api-reference/#tag/Artifacts/operation/delete_artifact_api_accounts__account_id__workspaces__workspace_id__artifacts__id__delete). + +Artifacts API +------------------------------------------------- + +Prefect provides the [Prefect REST API](https://docs.prefect.io/latest/api-ref/rest-api-reference/#tag/Artifacts) to allow you to create, read, and delete artifacts programmatically. With the Artifacts API, you can automate the creation and management of artifacts as part of your workflow. + +For example, to read the five most recently created Markdown, table, and link artifacts, you can run the following: + +```python +import requests + +PREFECT_API_URL="https://api.prefect.cloud/api/accounts/abc/workspaces/xyz" +PREFECT_API_KEY="pnu_ghijk" +data = { + "sort": "CREATED_DESC", + "limit": 5, + "artifacts": { + "key": { + "exists_": True + } + } +} + +headers = {"Authorization": f"Bearer {PREFECT_API_KEY}"} +endpoint = f"{PREFECT_API_URL}/artifacts/filter" + +response = requests.post(endpoint, headers=headers, json=data) +assert response.status_code == 200 +for artifact in response.json(): + print(artifact) + +``` + + +If you don't specify a key or that a key must exist, you will also return results (which are a type of key-less artifact). + +See the rest of the [Prefect REST API documentation](https://app.prefect.cloud/api/docs#tag/Artifacts) on artifacts for more information! \ No newline at end of file diff --git a/docs/2.19.x/concepts/automations.mdx b/docs/2.19.x/concepts/automations.mdx new file mode 100644 index 000000000000..e9c194275ff6 --- /dev/null +++ b/docs/2.19.x/concepts/automations.mdx @@ -0,0 +1,922 @@ +--- +title: Automations +--- + +Automations in Prefect Cloud enable you to configure [actions](#actions) that Prefect executes automatically based on [trigger](#triggers) conditions. + +Potential triggers include the occurrence of events from changes in a flow run's state - or the absence of such events. You can even define your own custom trigger to fire based on an [event](https://docs.prefect.io/cloud/events/) created from a webhook or a custom event defined in Python code. + +Potential actions include kicking off flow runs, pausing schedules, and sending custom notifications. + +**Automations are only available in Prefect Cloud** + +[Notifications](https://docs.prefect.io/concepts/notifications/) in an open-source Prefect server provide a subset of the notification message-sending features available in Automations. + + +Automations provide a flexible and powerful framework for automatically taking action in response to events. + +Automations overview +--------------------------------------------------------------- + +The **Automations** page provides an overview of all configured automations for your workspace. + +![Viewing automations for a workspace in Prefect Cloud.](/images/automations-1.png) + +Selecting the toggle next to an automation pauses execution of the automation. + +The button next to the toggle provides commands to copy the automation ID, edit the automation, or delete the automation. + +Select the name of an automation to view **Details** about it and relevant **Events**. + +Create an automation +--------------------------------------------------------------- + +On the **Automations** page, select the **+** icon to create a new automation. You'll be prompted to configure: + +* A [trigger](#triggers) condition that causes the automation to execute. +* One or more [actions](#actions) carried out by the automation. +* [Details](#details) about the automation, such as a name and description. + +### Triggers + +Triggers specify the conditions under which your action should be performed. The Prefect UI includes templates for many common conditions, such as: + +* Flow run state change +* Note - Flow Run Tags currently are only evaluated with `OR` criteria +* Work pool status +* Work queue status +* Deployment status +* Metric thresholds, such as average duration, lateness, or completion percentage +* Incident declarations (available on Pro and Custom plans) +* Custom event triggers + + +**Automations API** + +The [automations API](https://app.prefect.cloud/api/docs#tag/Automations) enables further programmatic customization of trigger and action policies based on arbitrary [events](https://app.prefect.cloud/api/docs#tag/Events). + + + +Importantly, triggers can be configured not only in reaction to events, but also proactively: to fire in the absence of an expected event. + +![Configuring a trigger for an automation in Prefect Cloud.](/images/automations-2.png) + +For example, in the case of flow run state change triggers, you might expect production flows to finish in no longer than thirty minutes. But transient infrastructure or network issues could cause your flow to get “stuck” in a running state. A trigger could kick off an action if the flow stays in a running state for more than 30 minutes. This action could be taken on the flow itself, such as canceling or restarting it. Or the action could take the form of a notification so someone can take manual remediation steps. Or you could set both actions to to take place when the trigger occurs. + +### Actions + +Actions specify what your automation does when its trigger criteria are met. Current action types include: + +* Cancel a flow run +* Pause or resume a schedule +* Run a deployment +* Pause or resume a deployment schedule +* Pause or resume a work pool +* Pause or resume a work queue +* Pause or resume an automation +* Send a [notification](#automation-notifications) +* Call a webhook +* Suspend a flow run +* Declare an incident (available on Pro and Custom plans) +* Change the state of a flow run + +![Configuring an action for an automation in Prefect Cloud.](/images/automations-3.png) + +### Creating automations In Python code + +You can create and access any automation with the Python SDK's `Automation` class and its methods. + +```python +from prefect.automations import Automation +from prefect.events.schemas.automations import EventTrigger +from prefect.server.events.actions import CancelFlowRun + +# creating an automation +automation = + Automation( + name="woodchonk", + trigger=EventTrigger( + expect={"animal.walked"}, + match={ + "genus": "Marmota", + "species": "monax", + }, + posture="Reactive", + threshold=3, + ), + actions=[CancelFlowRun()] + ).create() +print(automation) +# name='woodchonk' description='' enabled=True trigger=EventTrigger(type='event', match=ResourceSpecification(__root__={'genus': 'Marmota', 'species': 'monax'}), match_related=ResourceSpecification(__root__={}), after=set(), expect={'animal.walked'}, for_each=set(), posture=Posture.Reactive, threshold=3, within=datetime.timedelta(seconds=10)) actions=[CancelFlowRun(type='cancel-flow-run')] actions_on_trigger=[] actions_on_resolve=[] owner_resource=None id=UUID('d641c552-775c-4dc6-a31e-541cb11137a6') + +# reading the automation + +automation = Automation.read(id ="d641c552-775c-4dc6-a31e-541cb11137a6") +or +automation = Automation.read("woodchonk") + +print(automation) +# name='woodchonk' description='' enabled=True trigger=EventTrigger(type='event', match=ResourceSpecification(__root__={'genus': 'Marmota', 'species': 'monax'}), match_related=ResourceSpecification(__root__={}), after=set(), expect={'animal.walked'}, for_each=set(), posture=Posture.Reactive, threshold=3, within=datetime.timedelta(seconds=10)) actions=[CancelFlowRun(type='cancel-flow-run')] actions_on_trigger=[] actions_on_resolve=[] owner_resource=None id=UUID('d641c552-775c-4dc6-a31e-541cb11137a6') + +``` + + +### Selected and inferred action targets + +Some actions require you to either select the target of the action, or specify that the target of the action should be inferred. + +Selected targets are simple, and useful for when you know exactly what object your action should act on — for example, the case of a cleanup flow you want to run or a specific notification you’d like to send. + +Inferred targets are deduced from the trigger itself. + +For example, if a trigger fires on a flow run that is stuck in a running state, and the action is to cancel an inferred flow run, the flow run to cancel is inferred as the stuck run that caused the trigger to fire. + +Similarly, if a trigger fires on a work queue event and the corresponding action is to pause an inferred work queue, the inferred work queue is the one that emitted the event. + +Prefect tries to infer the relevant event whenever possible, but sometimes one does not exist. + +Specify a name and, optionally, a description for the automation. + +Custom triggers +----------------------------------------------------- + +When you need a trigger that doesn't quite fit the templates in UI trigger builder, you can define a custom trigger in JSON. With custom triggers, you have access to the full capabilities of Prefect's automation system - allowing you to react to many kinds of events and metrics in your workspace. + +Each automation has a single trigger that, when fired, will cause all of its associated actions to run. That single trigger may be a reactive or proactive event trigger, a trigger monitoring the value of a metric, or a composite trigger that combines several underlying triggers. + +### Event triggers + +Event triggers are the most common types of trigger, and they are intended to react to the presence or absence of an event happening in your workspace. Event triggers are indicated with `{"type": "event"}`. + +![Viewing a custom trigger for automations for a workspace in Prefect Cloud.](/images/automations-4.png) + +The schema that defines an event trigger is as follows: + + + +| Name | Type | Supports trailing wildcards | Description | +|-------------------|------------------|-----------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **match** | object | ✓ | Labels for resources which this Automation will match. | +| **match_related** | object | ✓ | Labels for related resources which this Automation will match. | +| **posture** | string enum | N/A | The posture of this Automation, either Reactive or Proactive. Reactive automations respond to the presence of the expected events, while Proactive automations respond to the absence of those expected events. | +| **after** | array of strings | ✓ | Event(s), one of which must have first been seen to start this automation. | +| **expect** | array of strings | ✓ | The event(s) this automation is expecting to see. If empty, this automation will evaluate any matched event. | +| **for_each** | array of strings | ✓ | Evaluate the Automation separately for each distinct value of these labels on the resource. By default, labels refer to the primary resource of the triggering event. You may also refer to labels from related resources by specifying related:<role>:<label>. This will use the value of that label for the first related resource in that role. | +| **threshold** | integer | N/A | The number of events required for this Automation to trigger (for Reactive automations), or the number of events expected (for Proactive automations) | +| **within** | number | N/A | The time period over which the events must occur. For Reactive triggers, this may be as low as 0 seconds, but must be at least 10 seconds for Proactive triggers | + +### Resource matching + +Both the `event` and `metric` triggers support matching events for specific resources in your workspace, including most Prefect objects (like flows, deployment, blocks, work pools, tags, etc) as well as resources you have defined in any events you emit yourself. The `match` and `match_related` fields control which events a trigger considers for evaluation by filtering on the contents of their `resource` and `related` fields, respectively. Each label added to a `match` filter is `AND`ed with the other labels, and can accept a single value or a list of multiple values that are `OR`ed together. + +Consider the `resource` and `related` fields on the following `prefect.flow-run.Completed` event, truncated for the sake of example. Its primary resource is a flow run, and since that flow run was started via a deployment, it is related to both its flow and its deployment: + +```python +"resource": { + "prefect.resource.id": "prefect.flow-run.925eacce-7fe5-4753-8f02-77f1511543db", + "prefect.resource.name": "cute-kittiwake" +} +"related": [ + { + "prefect.resource.id": "prefect.flow.cb6126db-d528-402f-b439-96637187a8ca", + "prefect.resource.role": "flow", + "prefect.resource.name": "hello" + }, + { + "prefect.resource.id": "prefect.deployment.37ca4a08-e2d9-4628-a310-cc15a323378e", + "prefect.resource.role": "deployment", + "prefect.resource.name": "example" + } +] + +``` + + +There are a number of valid ways to select the above event for evaluation, and the approach depends on the purpose of the automation. + +The following configuration will filter for any events whose primary resource is a flow run, _and_ that flow run has a name starting with `cute-` or `radical-`. + +```python +"match": { + "prefect.resource.id": "prefect.flow-run.*", + "prefect.resource.name": ["cute-*", "radical-*"] +}, +"match_related": {}, +... + +``` + + +This configuration, on the other hand, will filter for any events for which this specific deployment is a related resource. + +``` +"match": {}, +"match_related": { + "prefect.resource.id": "prefect.deployment.37ca4a08-e2d9-4628-a310-cc15a323378e" +}, +... + +``` + + +Both of the above approaches will select the example `prefect.flow-run.Completed` event, but will permit additional, possibly undesired events through the filter as well. `match` and `match_related` can be combined for more restrictive filtering: + +``` +"match": { + "prefect.resource.id": "prefect.flow-run.*", + "prefect.resource.name": ["cute-*", "radical-*"] +}, +"match_related": { + "prefect.resource.id": "prefect.deployment.37ca4a08-e2d9-4628-a310-cc15a323378e" +}, +... + +``` + + +Now this trigger will filter only for events whose primary resource is a flow run started by a specific deployment, _and_ that flow run has a name starting with `cute-` or `radical-`. + +### Expected events + +Once an event has passed through the `match` filters, it must be decided if this event should be counted toward the trigger's `threshold`. Whether that is the case is determined by the event names present in `expect`. + +This configuration informs the trigger to evaluate _only_ `prefect.flow-run.Completed` events that have passed the `match` filters. + +```python +"expect": [ + "prefect.flow-run.Completed" +], +... + +``` + + +`threshold` decides the quantity of `expect`ed events needed to satisfy the trigger. Increasing the `threshold` above 1 will also require use of `within` to define a range of time in which multiple events are seen. The following configuration will expect two occurrences of `prefect.flow-run.Completed` within 60 seconds. + +```python +"expect": [ + "prefect.flow-run.Completed" +], +"threshold": 2, +"within": 60, +... + +``` + + +`after` can be used to handle scenarios that require more complex event reactivity. + +Take, for example, this flow which emits an event indicating the table it operates on is missing or empty: + +```python +from prefect import flow +from prefect.events import emit_event +from db import Table + + +@flow +def transform(table_name: str): + table = Table(table_name) + + if not table.exists(): + emit_event( + event="table-missing", + resource={"prefect.resource.id": "etl-events.transform"} + ) + elif table.is_empty(): + emit_event( + event="table-empty", + resource={"prefect.resource.id": "etl-events.transform"} + ) + else: + # transform data + +``` + + +The following configuration uses `after` to prevent this automation from firing unless either a `table-missing` or a `table-empty` event has occurred before a flow run of this deployment completes. + + +**Tip** + +Note how `match` and `match_related` are used to ensure the trigger only evaluates events that are relevant to its purpose. + + +```python +"match": { + "prefect.resource.id": [ + "prefect.flow-run.*", + "etl-events.transform" + ] +}, +"match_related": { + "prefect.resource.id": "prefect.deployment.37ca4a08-e2d9-4628-a310-cc15a323378e" +} +"after": [ + "table-missing", + "table-empty" +] +"expect": [ + "prefect.flow-run.Completed" +], +... + +``` + + +### Evaluation strategy + +All of the previous examples were designed around a reactive `posture` - that is, count up events toward the `threshold` until it is met, then execute actions. To respond to the absence of events, use a proactive `posture`. A proactive trigger will fire when its `threshold` has _not_ been met by the end of the window of time defined by `within`. Proactive triggers must have a `within` value of at least 10 seconds. + +The following trigger will fire if a `prefect.flow-run.Completed` event is not seen within 60 seconds after a `prefect.flow-run.Running` event is seen. + +```python +{ + "match": { + "prefect.resource.id": "prefect.flow-run.*" + }, + "match_related": {}, + "after": [ + "prefect.flow-run.Running" + ], + "expect": [ + "prefect.flow-run.Completed" + ], + "for_each": [], + "posture": "Proactive", + "threshold": 1, + "within": 60 +} + +``` + + +However, without `for_each`, a `prefect.flow-run.Completed` event from a _different_ flow run than the one that started this trigger with its `prefect.flow-run.Running` event could satisfy the condition. Adding a `for_each` of `prefect.resource.id` will cause this trigger to be evaluated separately for each flow run id associated with these events. + +```python +{ + "match": { + "prefect.resource.id": "prefect.flow-run.*" + }, + "match_related": {}, + "after": [ + "prefect.flow-run.Running" + ], + "expect": [ + "prefect.flow-run.Completed" + ], + "for_each": [ + "prefect.resource.id" + ], + "posture": "Proactive", + "threshold": 1, + "within": 60 +} + +``` + + +### Metric triggers + +Metric triggers (`{"type": "metric"}`) fire when the value of a metric in your workspace crosses a threshold you've defined. For example, you can trigger an automation when the success rate of flows in your workspace drops below 95% over the course of an hour. + +Prefect's metrics are all derived by examining your workspace's events, and if applicable, use the `occurred` times of those events as the basis for their calculations. + +Prefect defines three metrics today: + +* **Successes** (`{"name": "successes"}`), defined as the number of flow runs that went `Pending` and then the latest state we saw was not a failure (`Failed` or `Crashed`). This metric accounts for retries if the ultimate state was successful. +* **Duration** (`{"name": "duration"}`), defined as the _length of time_ that a flow remains in a `Running` state before transitioning to a terminal state such as `Completed`, `Failed`, or `Crashed`. Because this time is derived in terms of flow run state change events, it may be greater than the runtime of your function. +* **Lateness** (`{"name": "lateness"}`), defined as the _length of time_ that a `Scheduled` flow remains in a `Late` state before transitioning to a `Running` and/or `Crashed` state. Only flow runs that the system marks `Late` are included. + +The schema of a metric trigger is as follows: + + + +| Name | Type | Supports trailing wildcards | Description | +|-------------------|----------------------|-----------------------------|----------------------------------------------------------------| +| **match** | object | ✓ | Labels for resources which this Automation will match. | +| **match_related** | object | ✓ | Labels for related resources which this Automation will match. | +| **metric** | `MetricTriggerQuery` | N/A | The definition of the metric query to run | + +And the `MetricTriggerQuery` query is defined as: + + + +| Name | Type | Description | +|----------------|-------------------------------------------------------------------|------------------------------------------------------------------------| +| **name** | string | The name of the Prefect metric to evaluate (see above). | +| **threshold** | number | The threshold to which the current metric value is compared | +| **operator** | string ("`&lt;`", "`&lt;=`", "`&gt;`", "`&gt;=`") | The comparison operator to use to decide if the threshold value is met | +| **range** | duration in seconds | How far back to evaluate the metric | +| **firing_for** | duration in seconds | How long the value must exceed the threshold before this trigger fires | + + +For example, to fire when flow runs tagged `production` in your workspace are failing at a rate of 10% or worse for the last hour (in other words, your success rate is below 90%), create this trigger: + +```python +{ + "type": "metric", + "match": { + "prefect.resource.id": "prefect.flow-run.*" + }, + "match_related": { + "prefect.resource.id": "prefect.tag.production", + "prefect.resource.role": "tag" + }, + "metric": { + "name": "successes", + "threshold": 0.9, + "operator": "<", + "range": 3600, + "firing_for": 0 + } +} + +``` + + +To detect when the average lateness of your Kubernetes workloads (running on a work pool named `kubernetes`) in the last day exceeds 5 minutes late, and that number hasn't gotten better for the last 10 minutes, use a trigger like this: + +```python +{ + "type": "metric", + "match": { + "prefect.resource.id": "prefect.flow-run.*" + }, + "match_related": { + "prefect.resource.id": "prefect.work-pool.kubernetes", + "prefect.resource.role": "work-pool" + }, + "metric": { + "name": "lateness", + "threshold": 300, + "operator": ">", + "range": 86400, + "firing_for": 600 + } +} + +``` + + +### Composite triggers + +To create a trigger from multiple kinds of events and metrics, use a `compound` or `sequence` trigger. These higher-order triggers are composed from a set of underlying `event` and `metric` triggers. + +For example, if you want to run a deployment only after three different flows in your workspace have written their results to a remote filesystem, combine them with a 'compound' trigger: + +```python +{ + "type": "compound", + "require": "all", + "within": 3600, + "triggers": [ + { + "type": "event", + "posture": "Reactive", + "expect": ["prefect.block.remote-file-system.write_path.called"], + "match_related": { + "prefect.resource.name": "daily-customer-export", + "prefect.resource.role": "flow" + } + }, + { + "type": "event", + "posture": "Reactive", + "expect": ["prefect.block.remote-file-system.write_path.called"], + "match_related": { + "prefect.resource.name": "daily-revenue-export", + "prefect.resource.role": "flow" + } + }, + { + "type": "event", + "posture": "Reactive", + "expect": ["prefect.block.remote-file-system.write_path.called"], + "match_related": { + "prefect.resource.name": "daily-expenses-export", + "prefect.resource.role": "flow" + } + } + ] +} + +``` + + +This trigger will fire once it sees at least one of each of the underlying event triggers fire within the time frame specified. Then the trigger will reset its state and fire the next time these three events all happen. The order the events occur doesn't matter, just that all of the events occur within one hour. + +If you want a flow run to complete prior to starting to watch for those three events, you can combine the entire previous trigger as the second part of a sequence of two triggers: + +```python +{ + // the outer trigger is now a "sequence" trigger + "type": "sequence", + "within": 7200, + "triggers": [ + // with the first child trigger expecting a Completed event + { + "type": "event", + "posture": "Reactive", + "expect": ["prefect.flow-run.Completed"], + "match_related": { + "prefect.resource.name": "daily-export-initiator", + "prefect.resource.role": "flow" + } + }, + // and the second child trigger being the compound trigger from the prior example + { + "type": "compound", + "require": "all", + "within": 3600, + "triggers": [ + { + "type": "event", + "posture": "Reactive", + "expect": ["prefect.block.remote-file-system.write_path.called"], + "match_related": { + "prefect.resource.name": "daily-customer-export", + "prefect.resource.role": "flow" + } + }, + { + "type": "event", + "posture": "Reactive", + "expect": ["prefect.block.remote-file-system.write_path.called"], + "match_related": { + "prefect.resource.name": "daily-revenue-export", + "prefect.resource.role": "flow" + } + }, + { + "type": "event", + "posture": "Reactive", + "expect": ["prefect.block.remote-file-system.write_path.called"], + "match_related": { + "prefect.resource.name": "daily-expenses-export", + "prefect.resource.role": "flow" + } + } + ] + } + ] +} + +``` + + +In this case, the trigger will only fire if it sees the `daily-export-initiator` flow complete, and then the three files written by the other flows. + +The `within` parameter for compound and sequence triggers constrains how close in time (in seconds) the child triggers must fire to satisfy the composite trigger. For example, if the `daily-export-initiator` flow runs, but the other three flows don't write their result files until three hours later, this trigger won't fire. Placing these time constraints on the triggers can prevent a misfire if you know that the events will generally happen within a specific timeframe, and you don't want a stray older event to be included in the evaluation of the trigger. If this isn't a concern for you, you may omit the `within` period, in which case there is no limit to how far apart in time the child triggers occur. + +Any type of trigger may be composed into higher-order composite triggers, including proactive event triggers and metric triggers. In the following example, the compound trigger will fire if any of the following events occur: a flow run stuck in `Pending`, a work pool becoming unready, or the average amount of `Late` work in your workspace going over 10 minutes: + +```python +{ + "type": "compound", + "require": "any", + "triggers": [ + { + "type": "event", + "posture": "Proactive", + "after": ["prefect.flow-run.Pending"], + "expect": ["prefect.flow-run.Running", "prefect.flow-run.Crashed"], + "for_each": ["prefect.resource.id"], + "match_related": { + "prefect.resource.name": "daily-customer-export", + "prefect.resource.role": "flow" + } + }, + { + "type": "event", + "posture": "Reactive", + "expect": ["prefect.work-pool.not-ready"], + "match": { + "prefect.resource.name": "kubernetes-workers", + } + }, + { + "type": "metric", + "metric": { + "name": "lateness", + "operator": ">", + "threshold": 600, + "range": 3600, + "firing_for": 300 + } + } + ] +} + +``` + + +For compound triggers, the `require` parameter may be `"any"`, `"all"`, or a number between 1 and the number of child triggers. In the example above, if you feel that you are receiving too many spurious notifications for issues that resolve on their own, you can specify `{"require": 2}` to express that any **two** of the triggers must fire in order for the compound trigger to fire. Sequence triggers, on the other hand, always require all of their child triggers to fire before they fire. + +Compound triggers are defined as: + + + +| Name | Type | Description | +|--------------|-----------------------------|-------------------------------------------------------------------------| +| **require** | number, `"any"`, or `"all"` | How many of the child triggers must fire for this trigger to fire | +| **within** | time, in seconds | How close in time the child triggers must fire for this trigger to fire | +| **triggers** | array of other triggers | | + + +Sequence triggers are defined as: + + +| Name | Type | Description | +|--------------|-------------------------|-------------------------------------------------------------------------| +| **within** | time, in seconds | How close in time the child triggers must fire for this trigger to fire | +| **triggers** | array of other triggers | | + + + +Create an automation via deployment triggers +--------------------------------------------------------------------------------------------------------------- + +To enable the simple configuration of event-driven deployments, Prefect provides deployment triggers - a shorthand for creating automations that are linked to specific deployments to run them based on the presence or absence of events. + +Trigger definitions for deployments are supported in `prefect.yaml`, `.serve`, and `.deploy`. At deployment time, specified trigger definitions will create linked automations that are triggered by events matching your chosen [grammar](https://docs.prefect.io/concepts/events/#event-grammar). Each trigger definition may include a [jinja template](https://en.wikipedia.org/wiki/Jinja_(template_engine)) to render the triggering `event` as the `parameters` of your deployment's flow run. + +### Defining triggers in `prefect.yaml` + +A list of triggers can be included directly on any deployment in a `prefect.yaml` file: + +```python +deployments: + - name: my-deployment + entrypoint: path/to/flow.py:decorated_fn + work_pool: + name: my-work-pool + triggers: + - type: event + enabled: true + match: + prefect.resource.id: my.external.resource + expect: + - external.resource.pinged + parameters: + param_1: "{{ event }}" + +``` + + +This deployment will create a flow run when an `external.resource.pinged` event _and_ an `external.resource.replied` event have been seen from `my.external.resource`: + +```python +deployments: + - name: my-deployment + entrypoint: path/to/flow.py:decorated_fn + work_pool: + name: my-work-pool + triggers: + - type: compound + require: all + parameters: + param_1: "{{ event }}" + triggers: + - type: event + match: + prefect.resource.id: my.external.resource + expect: + - external.resource.pinged + - type: event + match: + prefect.resource.id: my.external.resource + expect: + - external.resource.replied + +``` + + +### Defining triggers in `.serve` and `.deploy` + +For creating deployments with triggers in Python, the trigger types `DeploymentEventTrigger`, `DeploymentMetricTrigger`, `DeploymentCompoundTrigger`, and `DeploymentSequenceTrigger` can be imported from `prefect.events`: + +```python +from prefect import flow +from prefect.events import DeploymentEventTrigger + + +@flow(log_prints=True) +def decorated_fn(param_1: str): + print(param_1) + + +if __name__=="__main__": + decorated_fn.serve( + name="my-deployment", + triggers=[ + DeploymentEventTrigger( + enabled=True, + match={"prefect.resource.id": "my.external.resource"}, + expect=["external.resource.pinged"], + parameters={ + "param_1": "{{ event }}", + }, + ) + ], + ) + +``` + + +As with prior examples, composite triggers must be supplied with a list of underlying triggers: + +```python +from prefect import flow +from prefect.events import DeploymentCompoundTrigger + + +@flow(log_prints=True) +def decorated_fn(param_1: str): + print(param_1) + + +if __name__=="__main__": + decorated_fn.deploy( + name="my-deployment", + image="my-image-registry/my-image:my-tag" + triggers=[ + DeploymentCompoundTrigger( + enabled=True, + name="my-compound-trigger", + require="all", + triggers=[ + { + "type": "event", + "match": {"prefect.resource.id": "my.external.resource"}, + "expect": ["external.resource.pinged"], + }, + { + "type": "event", + "match": {"prefect.resource.id": "my.external.resource"}, + "expect": ["external.resource.replied"], + }, + ], + parameters={ + "param_1": "{{ event }}", + }, + ) + ], + work_pool_name="my-work-pool", + ) + +``` + + +### Pass triggers to `prefect deploy` + +You can pass one or more `--trigger` arguments to `prefect deploy`, which can be either a JSON string or a path to a `.yaml` or `.json` file. + +```python +# Pass a trigger as a JSON string +prefect deploy -n test-deployment \ + --trigger '{ + "enabled": true, + "match": { + "prefect.resource.id": "prefect.flow-run.*" + }, + "expect": ["prefect.flow-run.Completed"] + }' + +# Pass a trigger using a JSON/YAML file +prefect deploy -n test-deployment --trigger triggers.yaml +prefect deploy -n test-deployment --trigger my_stuff/triggers.json + +``` + + +For example, a `triggers.yaml` file could have many triggers defined: + +```python +triggers: + - enabled: true + match: + prefect.resource.id: my.external.resource + expect: + - external.resource.pinged + parameters: + param_1: "{{ event }}" + - enabled: true + match: + prefect.resource.id: my.other.external.resource + expect: + - some.other.event + parameters: + param_1: "{{ event }}" + +``` + + +Both of the above triggers would be attached to `test-deployment` after running `prefect deploy`. + + +**Triggers passed to `prefect deploy` will override any triggers defined in `prefect.yaml`** + +While you can define triggers in `prefect.yaml` for a given deployment, triggers passed to `prefect deploy` will take precedence over those defined in `prefect.yaml`. + + +Note that deployment triggers contribute to the total number of automations in your workspace. + +Automation notifications +----------------------------------------------------------------------- + +Notifications enable you to set up automation actions that send a message. + +Automation notifications support sending notifications via any predefined block that is capable of and configured to send a message. That includes, for example: + +* Slack message to a channel +* Microsoft Teams message to a channel +* Email to a configured email address + +![Configuring notifications for an automation in Prefect Cloud.](/images/automations-5.png) + +Templating with Jinja +----------------------------------------------------------------- + +Automation actions can access templated variables through [Jinja](https://palletsprojects.com/p/jinja/) syntax. Templated variables enable you to dynamically include details from an automation trigger, such as a flow or pool name. + +Jinja templated variable syntax wraps the variable name in double curly brackets, like this: `{{ variable }}`. + +You can access properties of the underlying flow run objects including: + +* [flow\_run](https://docs.prefect.io/api-ref/server/schemas/core/#prefect.server.schemas.core.FlowRun) +* [flow](https://docs.prefect.io/api-ref/server/schemas/core/#prefect.server.schemas.core.Flow) +* [deployment](https://docs.prefect.io/api-ref/server/schemas/core/#prefect.server.schemas.core.Deployment) +* [work\_queue](https://docs.prefect.io/api-ref/server/schemas/core/#prefect.server.schemas.core.WorkQueue) +* [work\_pool](https://docs.prefect.io/api-ref/server/schemas/core/#prefect.server.schemas.core.WorkPool) + +In addition to its native properties, each object includes an `id` along with `created` and `updated` timestamps. + +The `flow_run|ui_url` token returns the URL for viewing the flow run in Prefect Cloud. + +Here’s an example for something that would be relevant to a flow run state-based notification: + +```python +Flow run {{ flow_run.name }} entered state {{ flow_run.state.name }}. + + Timestamp: {{ flow_run.state.timestamp }} + Flow ID: {{ flow_run.flow_id }} + Flow Run ID: {{ flow_run.id }} + State message: {{ flow_run.state.message }} + +``` + + +The resulting Slack webhook notification would look something like this: + +![Configuring notifications for an automation in Prefect Cloud.](/images/automations-6.png) + +You could include `flow` and `deployment` properties. + +``` +Flow run {{ flow_run.name }} for flow {{ flow.name }} +entered state {{ flow_run.state.name }} +with message {{ flow_run.state.message }} + +Flow tags: {{ flow_run.tags }} +Deployment name: {{ deployment.name }} +Deployment version: {{ deployment.version }} +Deployment parameters: {{ deployment.parameters }} + +``` + + +An automation that reports on work pool status might include notifications using `work_pool` properties. + +``` +Work pool status alert! + +Name: {{ work_pool.name }} +Last polled: {{ work_pool.last_polled }} + +``` + + +In addition to those shortcuts for flows, deployments, and work pools, you have access to the automation and the event that triggered the automation. See the [Automations API](https://app.prefect.cloud/api/docs#tag/Automations) for additional details. + +``` +Automation: {{ automation.name }} +Description: {{ automation.description }} + +Event: {{ event.id }} +Resource: +{% for label, value in event.resource %} +{{ label }}: {{ value }} +{% endfor %} +Related Resources: +{% for related in event.related %} + Role: {{ related.role }} + {% for label, value in event.resource %} + {{ label }}: {{ value }} + {% endfor %} +{% endfor %} + +``` + + +Note that this example also illustrates the ability to use Jinja features such as iterator and for loop [control structures](https://jinja.palletsprojects.com/en/3.1.x/templates/#list-of-control-structures) when templating notifications. \ No newline at end of file diff --git a/docs/2.19.x/concepts/block--agent-based-deployments/agents.mdx b/docs/2.19.x/concepts/block--agent-based-deployments/agents.mdx new file mode 100644 index 000000000000..be76b0647edd --- /dev/null +++ b/docs/2.19.x/concepts/block--agent-based-deployments/agents.mdx @@ -0,0 +1,106 @@ +--- +title: Agents +--- + +**Workers are recommended** + + +Agents are part of the block-based deployment model. [Work Pools and Workers](https://docs.prefect.io/concepts/work-pools/) simplify the specification of a flow's infrastructure and runtime environment. If you have existing agents, you can [upgrade from agents to workers](https://docs.prefect.io/guides/upgrade-guide-agents-to-workers/) to significantly enhance the experience of deploying flows. + + +Agent overview +--------------------------------------------------- + +Agent processes are lightweight polling services that get scheduled work from a [work pool](#work-pool-overview) and deploy the corresponding flow runs. + +Agents poll for work every 15 seconds by default. This interval is configurable in your [profile settings](https://docs.prefect.io/concepts/settings/) with the `PREFECT_AGENT_QUERY_INTERVAL` setting. + +It is possible for multiple agent processes to be started for a single work pool. Each agent process sends a unique ID to the server to help disambiguate themselves and let users know how many agents are active. + +### Agent options + +Agents are configured to pull work from one or more work pool queues. If the agent references a work queue that doesn't exist, it will be created automatically. + +Configuration parameters you can specify when starting an agent include: + + +| Option | Description | +|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `--api` | The API URL for the Prefect server. Default is the value of `PREFECT_API_URL`. | +| `--hide-welcome` | Do not display the startup ASCII art for the agent process. | +| `--limit` | Maximum number of flow runs to start simultaneously. [default: None] | +| `--match`, `-m` | Dynamically matches work queue names with the specified prefix for the agent to pull from,for example `dev-` will match all work queues with a name that starts with `dev-`. [default: None] | +| `--pool`, `-p` | A work pool name for the agent to pull from. [default: None] | +| `--prefetch-seconds` | The amount of time before a flow run's scheduled start time to begin submission. Default is the value of `PREFECT_AGENT_PREFETCH_SECONDS`. | +| `--run-once` | Only run agent polling once. By default, the agent runs forever. [default: no-run-once] | +| `--work-queue`, `-q` | One or more work queue names for the agent to pull from. [default: None] | + + + +You must start an agent within an environment that can access or create the infrastructure needed to execute flow runs. Your agent will deploy flow runs to the infrastructure specified by the deployment. + + +**Prefect must be installed in execution environments** + + +Prefect must be installed in any environment in which you intend to run the agent or execute a flow run. + + + +**`PREFECT_API_URL` and `PREFECT_API_KEY` settings for agents** + +`PREFECT_API_URL` must be set for the environment in which your agent is running or specified when starting the agent with the `--api` flag. You must also have a user or service account with the `Worker` role, which can be configured by setting the `PREFECT_API_KEY`. + +If you want an agent to communicate with Prefect Cloud or a Prefect server from a remote execution environment such as a VM or Docker container, you must configure `PREFECT_API_URL` in that environment. + + +### Starting an agent + +Use the `prefect agent start` CLI command to start an agent. You must pass at least one work pool name or match string that the agent will poll for work. If the work pool does not exist, it will be created. + +``` +prefect agent start -p [work pool name] + +``` + + +For example: + +``` +Starting agent with ephemeral API... +  ___ ___ ___ ___ ___ ___ _____     _   ___ ___ _  _ _____ + | _ \ _ \ __| __| __/ __|_   _|   /_\ / __| __| \| |_   _| + |  _/   / _|| _|| _| (__  | |    / _ \ (_ | _|| .` | | | + |_| |_|_\___|_| |___\___| |_|   /_/ \_\___|___|_|\_| |_| + +Agent started! Looking for work from work pool 'my-pool'... + +``` + + +By default, the agent polls the API specified by the `PREFECT_API_URL` environment variable. To configure the agent to poll from a different server location, use the `--api` flag, specifying the URL of the server. + +In addition, agents can match multiple queues in a work pool by providing a `--match` string instead of specifying all of the queues. The agent will poll every queue with a name that starts with the given string. New queues matching this prefix will be found by the agent without needing to restart it. + +For example: + +``` +prefect agent start --match "foo-" + +``` + + +This example will poll every work queue that starts with "foo-". + +### Configuring prefetch + +By default, the agent begins submission of flow runs a short time (10 seconds) before they are scheduled to run. This allows time for the infrastructure to be created, so the flow run can start on time. In some cases, infrastructure will take longer than this to actually start the flow run. In these cases, the prefetch can be increased using the `--prefetch-seconds` option or the `PREFECT_AGENT_PREFETCH_SECONDS` setting. + +Submission can begin an arbitrary amount of time before the flow run is scheduled to start. If this value is _larger_ than the amount of time it takes for the infrastructure to start, the flow run will _wait_ until its scheduled start time. This allows flow runs to start exactly on time. + +Troubleshooting +----------------------------------------------------- + +### Agent crash or keyboard interrupt + +If the agent process is ended abruptly, you can sometimes have left over flows that were destined for the agent whose process was ended. In the UI, these will show up as pending. You will need to delete these flows in order for the restarted agent to begin processing the work queue again. Take note of the flows you deleted, you might need to set them to run manually. \ No newline at end of file diff --git a/docs/2.19.x/concepts/block--agent-based-deployments/deployments.mdx b/docs/2.19.x/concepts/block--agent-based-deployments/deployments.mdx new file mode 100644 index 000000000000..64e60b3f61a0 --- /dev/null +++ b/docs/2.19.x/concepts/block--agent-based-deployments/deployments.mdx @@ -0,0 +1,581 @@ +--- +title: Block Based Deployments +sidebarTitle: Deployments +--- + + +**Workers are recommended** + +This page is about the block-based deployment model. The [Work Pools and Workers](https://docs.prefect.io/concepts/work-pools/) based [deployment model](https://docs.prefect.io/concepts/deployments/) simplifies the specification of a flow's infrastructure and runtime environment. If you have existing agents, you can [upgrade from agents to workers](https://docs.prefect.io/guides/upgrade-guide-agents-to-workers/) to significantly enhance the experience of deploying flows. + + + + +We encourage you to check out the new [deployment experience](https://docs.prefect.io/concepts/deployments/) with guided command line prompts and convenient CI/CD with `prefect.yaml` files. + +With remote storage blocks, you can package not only your flow code script but also any supporting files, including your custom modules, SQL scripts and any configuration files needed in your project. + +To define how your flow execution environment should be configured, you may either reference pre-configured infrastructure blocks or let Prefect create those automatically for you as anonymous blocks (this happens when you specify the infrastructure type using `--infra` flag during the build process). + + +**Work queue affinity improved starting from Prefect 2.0.5** + +Until Prefect 2.0.4, tags were used to associate flow runs with work queues. Starting in Prefect 2.0.5, tag-based work queues are deprecated. Instead, work queue names are used to explicitly direct flow runs from deployments into queues. + +Note that **backward compatibility is maintained** and work queues that use tag-based matching can still be created and will continue to work. However, those work queues are now considered legacy and we encourage you to use the new behavior by specifying work queues explicitly on agents and deployments. + +See [Agents & Work Pools](https://docs.prefect.io/concepts/work-pools/) for details. + +Deployments and flows +----------------------------------------------------------------- + +Each deployment is associated with a single flow, but any given flow can be referenced by multiple deployments. + +Deployments are uniquely identified by the combination of: `flow_name/deployment_name`. + +``` +graph LR + F("my_flow"):::yellow -.-> A("Deployment 'daily'"):::tan --> W("my_flow/daily"):::fgreen + F -.-> B("Deployment 'weekly'"):::gold --> X("my_flow/weekly"):::green + F -.-> C("Deployment 'ad-hoc'"):::dgold --> Y("my_flow/ad-hoc"):::dgreen + F -.-> D("Deployment 'trigger-based'"):::dgold --> Z("my_flow/trigger-based"):::dgreen + + classDef gold fill:goldenrod,stroke:goldenrod,stroke-width:4px,color:white + classDef yellow fill:gold,stroke:gold,stroke-width:4px + classDef dgold fill:darkgoldenrod,stroke:darkgoldenrod,stroke-width:4px,color:white + classDef tan fill:tan,stroke:tan,stroke-width:4px,color:white + classDef fgreen fill:forestgreen,stroke:forestgreen,stroke-width:4px,color:white + classDef green fill:green,stroke:green,stroke-width:4px,color:white + classDef dgreen fill:darkgreen,stroke:darkgreen,stroke-width:4px,color:white +``` + + +This enables you to run a single flow with different parameters, based on multiple schedules and triggers, and in different environments. This also enables you to run different versions of the same flow for testing and production purposes. + +Deployment definition +----------------------------------------------------------------- + +A _deployment definition_ captures the settings for creating a [deployment object](#deployment-api-representation) on the Prefect API. You can create the deployment definition by: + +* Run the [`prefect deployment build` CLI command](#create-a-deployment-on-the-cli) with deployment options to create a [`deployment.yaml`](#deploymentyaml) deployment definition file, then run `prefect deployment apply` to create a deployment on the API using the settings in `deployment.yaml`. +* Define a [`Deployment`](https://docs.prefect.io/api-ref/prefect/deployments/deployments/) Python object, specifying the deployment options as properties of the object, then building and applying the object using methods of `Deployment`. + +The minimum required information to create a deployment includes: + +* The path and filename of the file containing the flow script. +* The name of the entrypoint flow function — this is the flow function that starts the flow and calls and additional tasks or subflows. +* The name of the deployment. + +You may provide additional settings for the deployment. Any settings you do not explicitly specify are inferred from defaults. + +Create a deployment on the CLI +----------------------------------------------------------------------------------- + +To create a deployment on the CLI, there are two steps: + +1. Build the deployment definition file `deployment.yaml`. This step includes uploading your flow to its configured remote storage location, if one is specified. +2. Create the deployment on the API. + +### Build the deployment + +To build the deployment definition file `deployment.yaml`, run the `prefect deployment build` Prefect CLI command from the folder containing your flow script and any dependencies of the script. + +``` +$ prefect deployment build [OPTIONS] PATH + +``` + + +Path to the flow is specified in the format `path-to-script:flow-function-name` — The path and filename of the flow script file, a colon, then the name of the entrypoint flow function. + +For example: + +``` +$ prefect deployment build -n marvin -p default-agent-pool -q test flows/marvin.py:say_hi + +``` + + +When you run this command, Prefect: + +* Creates a `marvin_flow-deployment.yaml` file for your deployment based on your flow code and options. +* Uploads your flow files to the configured storage location (local by default). +* Submit your deployment to the work queue `test`. The work queue `test` will be created if it doesn't exist. + + +**Uploading files may require storage filesystem libraries** + + +Note that the appropriate filesystem library supporting the storage location must be installed prior to building a deployment with a storage block. For example, the AWS S3 Storage block requires the [`s3fs`](https://s3fs.readthedocs.io/en/latest/) library. + + + + +**Ignore files or directories from a deployment** + + +By default, Prefect uploads _all files_ in the current folder to the configured storage location (local by default) when you build a deployment. + +If you want to omit certain files or directories from your deployments, add a `.prefectignore` file to the root directory. `.prefectignore` enables users to omit certain files or directories from their deployments. + +Similar to other `.ignore` files, the syntax supports pattern matching, so an entry of `*.pyc` will ensure all `.pyc` files are ignored by the deployment call when uploading to remote storage. + + + +### Deployment build options + +You may specify additional options to further customize your deployment. + + + +* Options: PATH + * Description: Path, filename, and flow name of the flow definition. (Required) +* Options: --apply, -a + * Description: When provided, automatically registers the resulting deployment with the API. +* Options: --cron TEXT + * Description: A cron string that will be used to set a CronSchedule on the deployment. For example, --cron "*/1 * * * *" to create flow runs from that deployment every minute. +* Options: --help + * Description: Display help for available commands and options. +* Options: --infra-block TEXT, -ib + * Description: The infrastructure block to use, in block-type/block-name format. +* Options: --infra, -i + * Description: The infrastructure type to use. (Default is Process) +* Options: --interval INTEGER + * Description: An integer specifying an interval (in seconds) that will be used to set an IntervalSchedule on the deployment. For example, --interval 60 to create flow runs from that deployment every minute. +* Options: --name TEXT, -n + * Description: The name of the deployment. +* Options: --output TEXT, -o + * Description: Optional location for the YAML manifest generated as a result of the build step. You can version-control that file, but it's not required since the CLI can generate everything you need to define a deployment. +* Options: --override TEXT + * Description: One or more optional infrastructure overrides provided as a dot delimited path. For example, specify an environment variable: env.env_key=env_value. For Kubernetes, specify customizations + * Description: An optional parameter override, values are parsed as JSON strings. For example, --param question=ultimate --param answer=42. +* Options: --params + * Description: An optional parameter override in a JSON string format. For example, --params. +* Options: --path + * Description: An optional path to specify a subdirectory of remote storage to upload to, or to point to a subdirectory of a locally stored flow. +* Options: --pool TEXT, -p + * Description: The work pool that will handle this deployment's runs. │ +* Options: --rrule TEXT + * Description: An RRule that will be used to set an RRuleSchedule on the deployment. For example, --rrule 'FREQ=HOURLY;BYDAY=MO,TU,WE,TH,FR;BYHOUR=9,10,11,12,13,14,15,16,17' to create flow runs from that deployment every hour but only during business hours. +* Options: --skip-upload + * Description: When provided, skips uploading this deployment's files to remote storage. +* Options: --storage-block TEXT, -sb + * Description: The storage block to use, in block-type/block-name or block-type/block-name/path format. Note that the appropriate library supporting the storage filesystem must be installed. +* Options: --tag TEXT, -t + * Description: One or more optional tags to apply to the deployment. +* Options: --version TEXT, -v + * Description: An optional version for the deployment. This could be a git commit hash if you use this command from a CI/CD pipeline. +* Options: --work-queue TEXT, -q + * Description: The work queue that will handle this deployment's runs. It will be created if it doesn't already exist. Defaults to None. Note that if a work queue is not set, work will not be scheduled. + + +### Block identifiers + +When specifying a storage block with the `-sb` or `--storage-block` flag, you may specify the block by passing its slug. The storage block slug is formatted as `block-type/block-name`. + +For example, `s3/example-block` is the slug for an S3 block named `example-block`. + +In addition, when passing the storage block slug, you may pass just the block slug or the block slug and a path. + +* `block-type/block-name` indicates just the block, including any path included in the block configuration. +* `block-type/block-name/path` indicates a storage path in addition to any path included in the block configuration. + +When specifying an infrastructure block with the `-ib` or `--infra-block` flag, you specify the block by passing its slug. The infrastructure block slug is formatted as `block-type/block-name`. + + +|Block name |Block class name|Block type for a slug| +|------------------|----------------|---------------------| +|Azure |Azure |azure | +|Docker Container |DockerContainer |docker-container | +|GitHub |GitHub |github | +|GCS |GCS |gcs | +|Kubernetes Job |KubernetesJob |kubernetes-job | +|Process |Process |process | +|Remote File System|RemoteFileSystem|remote-file-system | +|S3 |S3 |s3 | +|SMB |SMB |smb | +|GitLab Repository |GitLabRepository|gitlab-repository | + + +Note that the appropriate library supporting the storage filesystem must be installed prior to building a deployment with a storage block. For example, the AWS S3 Storage block requires the [`s3fs`](https://s3fs.readthedocs.io/en/latest/) library. See [Storage](https://docs.prefect.io/concepts/storage/) for more information. + +### deployment.yaml + +A deployment's YAML file configures additional settings needed to create a deployment on the server. + +A single flow may have multiple deployments created for it, with different schedules, tags, and so on. A single flow definition may have multiple deployment YAML files referencing it, each specifying different settings. The only requirement is that each deployment must have a unique name. + +The default `{flow-name}-deployment.yaml` filename may be edited as needed with the `--output` flag to `prefect deployment build`. + +```yaml +### +### A complete description of a Prefect Deployment for flow 'Cat Facts' +### +name: catfact +description: null +version: c0fc95308d8137c50d2da51af138aa23 +# The work queue that will handle this deployment's runs +work_queue_name: test +work_pool_name: null +tags: [] +parameters: {} +schedule: null +infra_overrides: {} +infrastructure: + type: process + env: {} + labels: {} + name: null + command: + - python + - -m + - prefect.engine + stream_output: true +### +### DO NOT EDIT BELOW THIS LINE +### +flow_name: Cat Facts +manifest_path: null +storage: null +path: /Users/terry/test/testflows/catfact +entrypoint: catfact.py:catfacts_flow +parameter_openapi_schema: + title: Parameters + type: object + properties: + url: + title: url + required: + - url + definitions: null + +``` + + +**Editing deployment.yaml** + + +See the big **DO NOT EDIT** comment in your deployment's YAML: In practice, anything above this block can be freely edited _before_ running `prefect deployment apply` to create the deployment on the API. + +We recommend editing most of these fields from the CLI or Prefect UI for convenience. + + +### Parameters in deployments + +You may provide default parameter values in the `deployment.yaml` configuration, and these parameter values will be used for flow runs based on the deployment. + +To configure default parameter values, add them to the `parameters: {}` line of `deployment.yaml` as JSON key-value pairs. The parameter list configured in `deployment.yaml` _must_ match the parameters expected by the entrypoint flow function. + +``` +parameters: {"name": "Marvin", "num": 42, "url": "https://catfact.ninja/fact"} + +``` + + +**Passing \*\*kwargs as flow parameters** + +You may pass `**kwargs` as a deployment parameter as a `"kwargs":{}` JSON object containing the key-value pairs of any passed keyword arguments. + +``` +parameters: {"name": "Marvin", "kwargs":{"cattype":"tabby","num": 42} + +``` + + +You can edit default parameters for deployments in the Prefect UI, and you can override default parameter values when creating ad-hoc flow runs via the Prefect UI. + +To edit parameters in the Prefect UI, go the the details page for a deployment, then select **Edit** from the commands menu. If you change parameter values, the new values are used for all future flow runs based on the deployment. + +To create an ad-hoc flow run with different parameter values, go the the details page for a deployment, select **Run**, then select **Custom**. You will be able to provide custom values for any editable deployment fields. Under **Parameters**, select **Custom**. Provide the new values, then select **Save**. Select **Run** to begin the flow run with custom values. + +If you want the Prefect API to verify the parameter values passed to a flow run against the schema defined by `parameter_openapi_schema`, set `enforce_parameter_schema` to `true`. + +![Configuring custom parameter values for an ad-hoc flow run](/images/deployments-1.png) + +### Create a deployment + +When you've configured `deployment.yaml` for a deployment, you can create the deployment on the API by running the `prefect deployment apply` Prefect CLI command. + +``` +$ prefect deployment apply catfacts_flow-deployment.yaml + +``` + + +For example: + +``` +$ prefect deployment apply ./catfacts_flow-deployment.yaml +Successfully loaded 'catfact' +Deployment '76a9f1ac-4d8c-4a92-8869-615bec502685' successfully created. + +``` + + +`prefect deployment apply` accepts an optional `--upload` flag that, when provided, uploads this deployment's files to remote storage. + +Once the deployment has been created, you'll see it in the [Prefect UI](https://docs.prefect.io/ui/flow-runs/) and can inspect it using the CLI. + +``` +$ prefect deployment ls + Deployments +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ Name ┃ ID ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ Cat Facts/catfact │ 76a9f1ac-4d8c-4a92-8869-615bec502685 │ +│ leonardo_dicapriflow/hello_leo │ fb4681d7-aa5a-4617-bf6f-f67e6f964984 │ +└────────────────────────────────┴──────────────────────────────────────┘ + +``` + + +![Viewing deployments in the Prefect UI](/images/deployments-3.png) + +When you run a deployed flow with Prefect, the following happens: + +* The user runs the deployment, which creates a flow run. (The API creates flow runs automatically for deployments with schedules.) +* An agent picks up the flow run from a work queue and uses an infrastructure block to create infrastructure for the run. +* The flow run executes within the infrastructure. + +[Agents and work pools](https://docs.prefect.io/concepts/work-pools/) enable the Prefect orchestration engine and API to run deployments in your local execution environments. To execute deployed flow runs you need to configure at least one agent. + + +**Scheduled flow runs** + +Scheduled flow runs will not be created unless the scheduler is running with either Prefect Cloud or a local Prefect server started with `prefect server start`. + +Scheduled flow runs will not run unless an appropriate [agent and work pool](https://docs.prefect.io/concepts/work-pools/) are configured. + + +Create a deployment from a Python object +------------------------------------------------------------------------------------------------------- + +You can also create deployments from Python scripts by using the [`prefect.deployments.Deployment`](https://docs.prefect.io/2.19.1/api-ref/prefect/deployments/deployments/#prefect.deployments.deployments.Deployment) class. + +Create a new deployment using configuration defaults for an imported flow: + +```python +from my_project.flows import my_flow +from prefect.deployments import Deployment + +deployment = Deployment.build_from_flow( + flow=my_flow, + name="example-deployment", + version=1, + work_queue_name="demo", + work_pool_name="default-agent-pool", +) +deployment.apply() + +``` + + +Create a new deployment with a pre-defined [storage block](https://docs.prefect.io/concepts/storage/) and an [infrastructure](https://docs.prefect.io/concepts/infrastructure/) override: + +```python +from my_project.flows import my_flow +from prefect.deployments import Deployment +from prefect.filesystems import S3 + +storage = S3.load("dev-bucket") # load a pre-defined block + +deployment = Deployment.build_from_flow( + flow=my_flow, + name="s3-example", + version=2, + work_queue_name="aws", + work_pool_name="default-agent-pool", + storage=storage, + infra_overrides={ + "env": { + "ENV_VAR": "value" + } + }, +) + +deployment.apply() + +``` + + +If you have settings that you want to share from an existing deployment you can load those settings: + +```python +deployment = Deployment( + name="a-name-you-used", + flow_name="name-of-flow" +) +deployment.load() # loads server-side settings + +``` + + +Once the existing deployment settings are loaded, you may update them as needed by changing deployment properties. + +View all of the parameters for the `Deployment` object in the [Python API documentation](https://docs.prefect.io/api-ref/prefect/deployments/deployments/). + +Deployment API representation +--------------------------------------------------------------------------------- + +When you create a deployment, it is constructed from deployment definition data you provide and additional properties set by client-side utilities. + +Deployment properties include: + +| Property | Description | +|------------------------------|------------------------------------------------------------------------------------------------------------------------------| +| `id` | An auto-generated UUID ID value identifying the deployment. | +| `created` | A `datetime` timestamp indicating when the deployment was created. | +| `updated` | A `datetime` timestamp indicating when the deployment was last changed. | +| `name` | The name of the deployment. | +| `version` | The version of the deployment | +| `description` | A description of the deployment. | +| `flow_id` | The id of the flow associated with the deployment. | +| `schedule` | An optional schedule for the deployment. | +| `is_schedule_active` | Boolean indicating whether the deployment schedule is active. Default is True. | +| `infra_overrides` | One or more optional infrastructure overrides | +| `parameters` | An optional dictionary of parameters for flow runs scheduled by the deployment. | +| `tags` | An optional list of tags for the deployment. | +| `work_queue_name` | The optional work queue that will handle the deployment's run | +| `parameter_openapi_schema` | JSON schema for flow parameters. | +| `enforce_parameter_schema` | Whether the API should validate the parameters passed to a flow run against the schema defined by `parameter_openapi_schema` | +| `path` | The path to the deployment.yaml file | +| `entrypoint` | The path to a flow entry point | +| `storage_document_id` | Storage block configured for the deployment. | +| `infrastructure_document_id` | Infrastructure block configured for the deployment. | + + +You can inspect a deployment using the CLI with the `prefect deployment inspect` command, referencing the deployment with `/`. + +```python +$ prefect deployment inspect 'Cat Facts/catfact' +{ + 'id': '76a9f1ac-4d8c-4a92-8869-615bec502685', + 'created': '2022-07-26T03:48:14.723328+00:00', + 'updated': '2022-07-26T03:50:02.043238+00:00', + 'name': 'catfact', + 'version': '899b136ebc356d58562f48d8ddce7c19', + 'description': None, + 'flow_id': '2c7b36d1-0bdb-462e-bb97-f6eb9fef6fd5', + 'schedule': None, + 'is_schedule_active': True, + 'infra_overrides': {}, + 'parameters': {}, + 'tags': [], + 'work_queue_name': 'test', + 'parameter_openapi_schema': { + 'title': 'Parameters', + 'type': 'object', + 'properties': {'url': {'title': 'url'}}, + 'required': ['url'] + }, + 'path': '/Users/terry/test/testflows/catfact', + 'entrypoint': 'catfact.py:catfacts_flow', + 'manifest_path': None, + 'storage_document_id': None, + 'infrastructure_document_id': 'f958db1c-b143-4709-846c-321125247e07', + 'infrastructure': { + 'type': 'process', + 'env': {}, + 'labels': {}, + 'name': None, + 'command': ['python', '-m', 'prefect.engine'], + 'stream_output': True + } +} + +``` + + +Create a flow run from a deployment +--------------------------------------------------------------------------------------------- + +### Create a flow run with a schedule + +If you specify a schedule for a deployment, the deployment will execute its flow automatically on that schedule as long as a Prefect server and agent are running. Prefect Cloud creates schedules flow runs automatically, and they will run on schedule if an agent is configured to pick up flow runs for the deployment. + +### Create a flow run with an event trigger + + +**deployment triggers are only available in Prefect Cloud** + + +Deployments can optionally take a trigger specification, which will configure an automation to run the deployment based on the presence or absence of events, and optionally pass event data into the deployment run as parameters via jinja templating. + +```js +triggers: + - enabled: true + match: + prefect.resource.id: prefect.flow-run.* + expect: + - prefect.flow-run.Completed + match_related: + prefect.resource.name: prefect.flow.etl-flow + prefect.resource.role: flow + parameters: + param_1: "{{ event }}" + +``` + + +When applied, this deployment will start a flow run upon the completion of the upstream flow specified in the `match_related` key, with the flow run passed in as a parameter. Triggers can be configured to respond to the presence or absence of arbitrary internal or external [events](https://docs.prefect.io/cloud/events). The trigger system and API are detailed in [Automations](https://docs.prefect.io/cloud/automations/). + +### Create a flow run with Prefect UI + +In the Prefect UI, you can click the **Run** button next to any deployment to execute an ad hoc flow run for that deployment. + +The `prefect deployment` CLI command provides commands for managing and running deployments locally. + + +|Command |Description | +|---------------|---------------------------------------------------------------| +|apply |Create or update a deployment from a YAML file. | +|build |Generate a deployment YAML from /path/to/file.py:flow_function.| +|delete |Delete a deployment. | +|inspect |View details about a deployment. | +|ls |View all deployments or deployments for specific flows. | +|pause-schedule |Pause schedule of a given deployment. | +|resume-schedule|Resume schedule of a given deployment. | +|run |Create a flow run for the given flow and deployment. | +|schedule |Commands for interacting with your deployment's schedules. | +|set-schedule |Set schedule for a given deployment. | + + +**Deprecated Schedule Commands** + + +The pause-schedule, resume-schedule, and set-schedule commands are deprecated due to the introduction of multi-schedule support for deployments. Use the new `prefect deployment schedule` command for enhanced flexibility and control over your deployment schedules. + +### Create a flow run in a Python script + +You can create a flow run from a deployment in a Python script with the `run_deployment` function. + +```python +from prefect.deployments import run_deployment + + +def main(): + response = run_deployment(name="flow-name/deployment-name") + print(response) + + +if __name__ == "__main__": + main() + +``` + + +**`PREFECT_API_URL` setting for agents** + + +You'll need to configure [agents and work pools](https://docs.prefect.io/concepts/work-pools/) that can create flow runs for deployments in remote environments. [`PREFECT_API_URL`](https://docs.prefect.io/concepts/settings/#prefect_api_url) must be set for the environment in which your agent is running. + +If you want the agent to communicate with Prefect Cloud from a remote execution environment such as a VM or Docker container, you must configure `PREFECT_API_URL` in that environment. + + +Examples +--------------------------------------- + +* [How to deploy Prefect flows to AWS](https://discourse.prefect.io/t/how-to-deploy-prefect-2-0-flows-to-aws/1252) +* [How to deploy Prefect flows to GCP](https://discourse.prefect.io/t/how-to-deploy-prefect-2-0-flows-to-gcp/1251) +* [How to deploy Prefect flows to Azure](https://discourse.prefect.io/t/how-to-deploy-prefect-2-0-flows-to-azure/1312) +* [How to deploy Prefect flows using files stored locally](https://discourse.prefect.io/t/how-to-deploy-prefect-2-0-flows-to-run-as-a-local-process-docker-container-or-a-kubernetes-job/1246) \ No newline at end of file diff --git a/docs/2.19.x/concepts/block--agent-based-deployments/storage.mdx b/docs/2.19.x/concepts/block--agent-based-deployments/storage.mdx new file mode 100644 index 000000000000..d00a061b5c67 --- /dev/null +++ b/docs/2.19.x/concepts/block--agent-based-deployments/storage.mdx @@ -0,0 +1,119 @@ +--- +title: Storage +--- + + +**Storage blocks are not recommended** + +Storage blocks are part of the legacy block-based deployment model. Instead, using `serve` or `runner`\-based Python creation methods or workers and work pools with `prefect deploy` via the CLI are the recommended options for creating a deployment. Flow code storage can be specified in the Python file with `serve` or `runner`\-based Python creation methods; alternatively, with the work pools and workers style of flow deployment, you can specify flow code storage during the interactive `prefect deploy` CLI experience and in its resulting `prefect.yaml` file. + + +Storage lets you configure how flow code for deployments is persisted and retrieved by [Prefect workers](https://docs.prefect.io/concepts/work-pools) (or legacy [agents](https://docs.prefect.io/concepts/agents)). Anytime you build a block-based deployment, a storage block is used to upload the entire directory containing your workflow code (along with supporting files) to its configured location. This helps ensure portability of your relative imports, configuration files, and more. Note that your environment dependencies (for example, external Python packages) still need to be managed separately. + +If no storage is explicitly configured, Prefect will use `LocalFileSystem` storage by default. Local storage works fine for many local flow run scenarios, especially when testing and getting started. However, due to the inherent lack of portability, many use cases are better served by using remote storage such as S3 or Google Cloud Storage. + +Prefect supports creating multiple storage configurations and switching between storage as needed. + +**Storage uses blocks** + + +[Blocks](https://docs.prefect.io/concepts/blocks/) are the Prefect technology underlying storage, and enables you to do so much more. + +In addition to creating storage blocks via the Prefect CLI, you can now create storage blocks and other kinds of block configuration objects via the [Prefect UI and Prefect Cloud](https://docs.prefect.io/ui/blocks/). + + +Configuring storage for a deployment +----------------------------------------------------------------------------------------------- + +When building a deployment for a workflow, you have two options for configuring workflow storage: + +* Use the default local storage +* Preconfigure a storage block to use + +### Using the default + +Anytime you call `prefect deployment build` without providing the `--storage-block` flag, a default `LocalFileSystem` block will be used. Note that this block will always use your present working directory as its basepath (which is usually desirable). You can see the block's settings by inspecting the `deployment.yaml` file that Prefect creates after calling `prefect deployment build`. + +While you generally can't run a deployment stored on a local file system on other machines, any agent running on the same machine will be able to successfully run your deployment. + +### Supported storage blocks + +Current options for deployment storage blocks include: + + + +* Storage: Local File System + * Description: Store code in a run's local file system. + * Required Library: +* Storage: Remote File System + * Description: Store code in a any filesystem supported by fsspec. + * Required Library: +* Storage: AWS S3 Storage + * Description: Store code in an AWS S3 bucket. + * Required Library: s3fs +* Storage: Azure Storage + * Description: Store code in Azure Datalake and Azure Blob Storage. + * Required Library: adlfs +* Storage: GitHub Storage + * Description: Store code in a GitHub repository. + * Required Library: +* Storage: Google Cloud Storage + * Description: Store code in a Google Cloud Platform (GCP) Cloud Storage bucket. + * Required Library: gcsfs +* Storage: SMB + * Description: Store code in SMB shared network storage. + * Required Library: smbprotocol +* Storage: GitLab Repository + * Description: Store code in a GitLab repository. + * Required Library: prefect-gitlab +* Storage: Bitbucket Repository + * Description: Store code in a Bitbucket repository. + * Required Library: prefect-bitbucket + + +**Accessing files may require storage filesystem libraries** + + +Note that the appropriate filesystem library supporting the storage location must be installed prior to building a deployment with a storage block or accessing the storage location from flow scripts. + +For example, the AWS S3 Storage block requires the [`s3fs`](https://s3fs.readthedocs.io/en/latest/) library. + +See [Filesystem package dependencies](https://docs.prefect.io/concepts/filesystems/#filesystem-package-dependencies) for more information about configuring filesystem libraries in your execution environment. + +### Configuring a block + +You can create these blocks either via the UI or via Python. + +You can [create, edit, and manage storage blocks](https://docs.prefect.io/ui/blocks/) in the Prefect UI and Prefect Cloud. On a Prefect server, blocks are created in the server's database. On Prefect Cloud, blocks are created on a workspace. + +To create a new block, select the **+** button. Prefect displays a library of block types you can configure to create blocks to be used by your flows. + +![Viewing the new block library in the Prefect UI](/images/storage1.png) + +Select **Add +** to configure a new storage block based on a specific block type. Prefect displays a **Create** page that enables specifying storage settings. + +![Configuring an S3 storage block in the Prefect UI](/images/storage2.png) + +You can also create blocks using the Prefect Python API: + +```python +from prefect.filesystems import S3 + +block = S3(bucket_path="my-bucket/a-sub-directory", + aws_access_key_id="foo", + aws_secret_access_key="bar" +) +block.save("example-block") + +``` + + +This block configuration is now available to be used by anyone with appropriate access to your Prefect API. We can use this block to build a deployment by passing its slug to the `prefect deployment build` command. The storage block slug is formatted as `block-type/block-name`. In this case, `s3/example-block` for an AWS S3 Bucket block named `example-block`. See [block identifiers](https://docs.prefect.io/concepts/deployments/#block-identifiers) for details. + +``` +prefect deployment build ./flows/my_flow.py:my_flow --name "Example Deployment" --storage-block s3/example-block + +``` + + +This command will upload the contents of your flow's directory to the designated storage location, then the full deployment specification will be persisted to a newly created `deployment.yaml` file. For more information, see [Deployments](https://docs.prefect.io/concepts/deployments). \ No newline at end of file diff --git a/docs/2.19.x/concepts/blocks.mdx b/docs/2.19.x/concepts/blocks.mdx new file mode 100644 index 000000000000..93fa0857d7f7 --- /dev/null +++ b/docs/2.19.x/concepts/blocks.mdx @@ -0,0 +1,525 @@ +--- +title: Blocks +description: Prefect blocks store configuration and provide an interface for interacting with external systems.blo +--- + +Blocks expose methods that provide functionality specific to the systems they interface with. For example, blocks can be used to download data from or upload data to an S3 bucket, query data from or write data to a database, or send a message to a Slack channel. + +Overview +--------------------------------------- + +Block types are Python classes with a handy UI webform for configuration. Blocks are instantiation of these classes with specific values. + +Configure blocks through Python code or via a form in the UI. Access blocks for use in Python code. + +Block values are stored in Prefect Cloud or your self-hosted Prefect server instance. Blocks can be shared with other users in your Prefect Cloud workspace. + +To see block types available for configuration, use `prefect block type ls` from the CLI or navigate to the **Blocks** page in the UI and click **+**. + +![The block catalogue in the UI](/images/blocks1.png) + +**Blocks and parameters** + +Blocks are useful for configuration that needs to be shared across flow runs and between flows. + +For configuration that will change between flow runs, we recommend using [parameters](https://docs.prefect.io/concepts/flows/#parameters). + + + +Prefect built-in blocks +--------------------------------------------------------------------- + +Commonly used block types come built-in with Prefect. These block types can be created via the UI and used without installing any additional packages. + + +| Block | Slug | Description | +|---------------------------|-----------------------------|---------------------------------------------------------------------------------------------------| +| Azure | `azure` | Stores data as a file on Azure Data Lake and Azure Blob Storage. | +| Custom Webhook | `custom-webhook` | Calls custom webhooks. | +| Discord Webhook | `discord-webhook` | Calls Discord webhooks. | +| Date Time | `date-time` | Stores a datetime value. | +| Docker Container | `docker-container` | Runs a command in a container. | +| Docker Registry | `docker-registry` | Connects to a Docker registry. Requires a Docker Engine to be connectable. | +| GCS | `gcs` | Store data as a file on Google Cloud Storage. | +| GitHub | `github` | Interacts with files stored on public GitHub repositories. | +| JSON | `json` | Stores JSON data. | +| Kubernetes Cluster Config | `kubernetes-cluster-config` | Stores configuration for interaction with Kubernetes clusters. | +| Kubernetes Job | `kubernetes-job` | Runs a command as a Kubernetes Job. | +| Local File System | `local-file-system` | Stores data as a file on a local file system. | +| Mattermost Webhook | `mattermost-webhook` | Sends notifications via a provided Mattermost webhook. | +| Microsoft Teams Webhook | `ms-teams-webhook` | Sends notifications via a provided Microsoft Teams webhook. | +| Opsgenie Webhook | `opsgenie-webhook` | Sends notifications via a provided Opsgenie webhook. | +| Pager Duty Webhook | `pager-duty-webhook` | Sends notifications via a provided PagerDuty webhook. | +| Process | `process` | Run a command in a new process. | +| Remote File System | `remote-file-system` | Stores data as a file on any remote file system that supports fsspec. | +| S3 | `s3` | Stores data as a file on AWS S3. | +| Secret | `secret` | Stores a secret value. The value will be obfuscated when this block is logged or shown in the UI. | +| Sendgrid Email | `sendgrid-email` | Sends notifications via Sendgrid email. | +| Slack Webhook | `slack-webhook` | Sends notifications via a provided Slack webhook. | +| SMB | `smb` | Stores data as a file on a SMB share. | +| String | `string` | Stores a string value. | +| Twilio SMS | `twilio-sms` | Sends notifications via Twilio SMS. | + + +**Warning** + +The `S3`, `Azure`, `GCS`, and `GitHub` blocks are deprecated in favor of the the corresponding `S3Bucket`, `AzureBlobStorageCredentials`, `GCSBucket`, and `GitHubRepository` blocks found in the [Prefect integration libraries](https://docs.prefect.io/integrations/). + + +Blocks in Prefect integration libraries +----------------------------------------------------------------------------------------------------- + +Some block types that appear in the UI can be created immediately, and then the corresponding integration library must be installed for use. For example, an AWS Secret block can be created, but not used until the [`prefect-aws` library](https://docs.prefect.io/integrations/prefect-aws/) is installed. + +Block types can be created by anyone and optionally shared with the community. You'll find block types available for consumption in many of the published [Prefect integrations libraries](https://docs.prefect.io/integrations/). If a block type is not available in the UI, you can [register it](#register-blocks) via the CLI. + + +|Integration |Block |Slug | +|------------------|------------------------------------|------------------------------------| +|prefect-aws |ECS Task |ecs-task | +|prefect-aws |MinIO Credentials |minio-credentials | +|prefect-aws |S3 Bucket |s3-bucket | +|prefect-azure |Azure Blob Storage Credentials |azure-blob-storage-credentials | +|prefect-azure |Azure Container Instance Credentials|azure-container-instance-credentials| +|prefect-azure |Azure Container Instance Job |azure-container-instance-job | +|prefect-azure |Azure Cosmos DB Credentials |azure-cosmos-db-credentials | +|prefect-azure |AzureML Credentials |azureml-credentials | +|prefect-bitbucket |BitBucket Credentials |bitbucket-credentials | +|prefect-bitbucket |BitBucket Repository |bitbucket-repository | +|prefect-databricks|Databricks Credentials |databricks-credentials | +|prefect-dbt |dbt CLI BigQuery Target Configs |dbt-cli-bigquery-target-configs | +|prefect-dbt |dbt CLI Profile |dbt-cli-profile | +|prefect-dbt |dbt Cloud Credentials |dbt-cloud-credentials | +|prefect-dbt |dbt CLI Global Configs |dbt-cli-global-configs | +|prefect-dbt |dbt CLI Postgres Target Configs |dbt-cli-postgres-target-configs | +|prefect-dbt |dbt CLI Snowflake Target Configs |dbt-cli-snowflake-target-configs | +|prefect-dbt |dbt CLI Target Configs |dbt-cli-target-configs | +|prefect-docker |Docker Host |docker-host | +|prefect-docker |Docker Registry Credentials |docker-registry-credentials | +|prefect-email |Email Server Credentials |email-server-credentials | +|prefect-gcp |BigQuery Warehouse |bigquery-warehouse | +|prefect-gcp |GCP Cloud Run Job |cloud-run-job | +|prefect-gcp |GCP Credentials |gcp-credentials | +|prefect-gcp |GcpSecret |gcpsecret | +|prefect-gcp |GCS Bucket |gcs-bucket | +|prefect-gcp |Vertex AI Custom Training Job |vertex-ai-custom-training-job | +|prefect-github |GitHub Credentials |github-credentials | +|prefect-github |GitHub Repository |github-repository | +|prefect-gitlab |GitLab Credentials |gitlab-credentials | +|prefect-gitlab |GitLab Repository |gitlab-repository | +|prefect-kubernetes|Kubernetes Credentials |kubernetes-credentials | +|prefect-shell |Shell Operation |shell-operation | +|prefect-slack |Slack Credentials |slack-credentials | +|prefect-slack |Slack Incoming Webhook |slack-incoming-webhook | +|prefect-snowflake |Snowflake Connector |snowflake-connector | +|prefect-snowflake |Snowflake Credentials |snowflake-credentials | +|prefect-sqlalchemy|Database Credentials |database-credentials | +|prefect-sqlalchemy|SQLAlchemy Connector |sqlalchemy-connector | + + +Use existing block types +----------------------------------------------------------------------- + +Blocks are classes that subclass the `Block` base class. They can be instantiated and used like normal classes. + +### Instantiate blocks + +To instantiate a block that stores a JSON value, use the `JSON` block: + +``` +from prefect.blocks.system import JSON + +json_block = JSON(value={"the_answer": 42}) + +``` + + +### Save blocks + +To retrieve this saved value use the `.save()` method: + +``` +json_block.save(name="life-the-universe-everything") + +``` + + +To update saved block value stored for a given block, overwrite the existing block by passing `overwrite=True`: + +``` +json_block.save(overwrite=True) + +``` + + +Create a new JSON block by setting the `name` parameter to a new value: + +``` +json_block.save(name="actually-life-the-universe-everything") + +``` + + +Note that blocks can also be created and updated via the [Prefect UI](https://docs.prefect.io/ui/blocks/). + +### Load blocks + +The block name can be used to load the block: + +``` +from prefect import flow +from prefect.blocks.system import JSON + +@flow +def what_is_the_answer(): + json_block = JSON.load("life-the-universe-everything") + print(json_block.value["the_answer"]) + +if __name__ == "__main__": + what_is_the_answer() # 42 + +``` + + +Alternatively, load a block with the unique slug that is a combination of the block type slug and the block name. + +To load our JSON block from above, run the following: + +``` +from prefect.blocks.core import Block + +json_block = Block.load("json/life-the-universe-everything") +print(json_block.value["the-answer"]) #42 + +``` + + +### Delete blocks + +Delete a block with the `.delete()` method: + +``` +from prefect.blocks.core import Block + +Block.delete("json/life-the-universe-everything") + +``` + + +Alternatively, use the CLI to delete specific blocks with a given slug or id: + +``` +prefect block delete json/life-the-universe-everything + +``` + + +``` +prefect block delete --id + +``` + + +Creating new block types +----------------------------------------------------------------------- + +To create a custom block type, define a class that subclasses `Block`. The `Block` base class builds on Pydantic's `BaseModel`, so custom blocks can be [declared in the same manner as a Pydantic model](https://pydantic-docs.helpmanual.io/usage/models/#basic-model-usage). + +Here's a block that represents a cube and holds information about the length of each edge in inches: + +``` +from prefect.blocks.core import Block + +class Cube(Block): + edge_length_inches: float + +``` + + +You can include methods on a block to provide functionality. Here's the same cube block with methods to calculate the volume and surface area of the cube: + +``` +from prefect.blocks.core import Block + +class Cube(Block): + edge_length_inches: float + + def get_volume(self): + return self.edge_length_inches**3 + + def get_surface_area(self): + return 6 * self.edge_length_inches**2 + +``` + + +Use the new `Cube` block type in a flow: + +``` +from prefect import flow + +rubiks_cube = Cube(edge_length_inches=2.25) +rubiks_cube.save("rubiks-cube") + +@flow +def calculate_cube_surface_area(cube_name): + cube = Cube.load(cube_name) + print(cube.get_surface_area()) + +if __name__ == "__main__": + calculate_cube_surface_area("rubiks-cube") # 30.375 + +``` + + +### Secret fields + +All block values are encrypted before being stored. If you have values that you would not like visible in the UI or in logs, use the `SecretStr` field type provided by Pydantic to automatically obfuscate those values. This functionality can be useful for fields that are used to store credentials such as passwords and API tokens. + +Here's an example of an `AWSCredentials` block that uses `SecretStr`: + +``` +from typing import Optional + +from prefect.blocks.core import Block +from pydantic import SecretStr # if pydantic version >= 2.0, use: from pydantic.v1 import SecretStr + +class AWSCredentials(Block): + aws_access_key_id: Optional[str] = None + aws_secret_access_key: Optional[SecretStr] = None + aws_session_token: Optional[str] = None + profile_name: Optional[str] = None + region_name: Optional[str] = None + +``` + + +Because `aws_secret_access_key` has the `SecretStr` type hint assigned to it, the value of that field will not be exposed if the object is logged: + +``` +aws_credentials_block = AWSCredentials( + aws_access_key_id="AKIAJKLJKLJKLJKLJKLJK", + aws_secret_access_key="secret_access_key" +) + +print(aws_credentials_block) +# aws_access_key_id='AKIAJKLJKLJKLJKLJKLJK' aws_secret_access_key=SecretStr('**********') aws_session_token=None profile_name=None region_name=None + +``` + + +Prefect's `SecretDict` field type allows you to add a dictionary field to your block that will have values at all levels automatically obfuscated in the UI or in logs. This functionality is useful for blocks where typing or structure of secret fields is not known until configuration time. + +Here's an example of a block that uses `SecretDict`: + +``` +from typing import Dict + +from prefect.blocks.core import Block +from prefect.blocks.fields import SecretDict + + +class SystemConfiguration(Block): + system_secrets: SecretDict + system_variables: Dict + + +system_configuration_block = SystemConfiguration( + system_secrets={ + "password": "p@ssw0rd", + "api_token": "token_123456789", + "private_key": "", + }, + system_variables={ + "self_destruct_countdown_seconds": 60, + "self_destruct_countdown_stop_time": 7, + }, +) + +``` + + +`system_secrets` will be obfuscated when `system_configuration_block` is displayed, but `system_variables` will be shown in plain-text: + +``` +print(system_configuration_block) +# SystemConfiguration( +# system_secrets=SecretDict('{'password': '**********', 'api_token': '**********', 'private_key': '**********'}'), +# system_variables={'self_destruct_countdown_seconds': 60, 'self_destruct_countdown_stop_time': 7} +# ) + +``` + + +### Block type metadata + +The way that a block is displayed can be controlled by metadata fields that can be set on a block type's subclass. + +Available metadata fields include: + + + +* Property: _block_type_name + * Description: Display name of the block in the UI. Defaults to the class name. +* Property: _block_type_slug + * Description: Unique slug used to reference the block type in the API. Defaults to a lowercase, dash-delimited version of the block type name. +* Property: _logo_url + * Description: URL pointing to an image that should be displayed for the block type in the UI. Default to None. +* Property: _description + * Description: Short description of block type. Defaults to docstring, if provided. +* Property: _code_example + * Description: Short code snippet shown in UI for how to load/use block type. Default to first example provided in the docstring of the class, if provided. + + +### Nested blocks + +Blocks are composable - a block can be used within other blocks. You can create a block type that uses functionality from another block type by declaring it as an attribute. + +Nestable blocks are loosely coupled, as configuration can be changed for each block independently. This allows configuration to be shared across multiple use cases. + +To illustrate, here's a an expanded `AWSCredentials` block that includes the ability to get an authenticated session via the `boto3` library: + +``` +from typing import Optional + +import boto3 +from prefect.blocks.core import Block +from pydantic import SecretStr + +class AWSCredentials(Block): + aws_access_key_id: Optional[str] = None + aws_secret_access_key: Optional[SecretStr] = None + aws_session_token: Optional[str] = None + profile_name: Optional[str] = None + region_name: Optional[str] = None + + def get_boto3_session(self): + return boto3.Session( + aws_access_key_id = self.aws_access_key_id + aws_secret_access_key = self.aws_secret_access_key + aws_session_token = self.aws_session_token + profile_name = self.profile_name + region_name = self.region + ) + +``` + + +The `AWSCredentials` block can be used within an S3Bucket block to provide authentication when interacting with an S3 bucket: + +``` +import io + +class S3Bucket(Block): + bucket_name: str + credentials: AWSCredentials + + def read(self, key: str) -> bytes: + s3_client = self.credentials.get_boto3_session().client("s3") + + stream = io.BytesIO() + s3_client.download_fileobj(Bucket=self.bucket_name, key=key, Fileobj=stream) + + stream.seek(0) + output = stream.read() + + return output + + def write(self, key: str, data: bytes) -> None: + s3_client = self.credentials.get_boto3_session().client("s3") + stream = io.BytesIO(data) + s3_client.upload_fileobj(stream, Bucket=self.bucket_name, Key=key) + +``` + + +You can use this `S3Bucket` block with previously saved `AWSCredentials` block values in order to interact with the configured S3 bucket: + +``` +my_s3_bucket = S3Bucket( + bucket_name="my_s3_bucket", + credentials=AWSCredentials.load("my_aws_credentials") +) + +my_s3_bucket.save("my_s3_bucket") + +``` + + +Saving block values like this links the values of the two blocks so that any changes to the values stored for the `AWSCredentials` block with the name `my_aws_credentials` will be seen the next time that block values for the `S3Bucket` block named `my_s3_bucket` is loaded. + +Values for nested blocks can also be hard coded by not first saving child blocks: + +``` +my_s3_bucket = S3Bucket( + bucket_name="my_s3_bucket", + credentials=AWSCredentials( + aws_access_key_id="AKIAJKLJKLJKLJKLJKLJK", + aws_secret_access_key="secret_access_key" + ) +) + +my_s3_bucket.save("my_s3_bucket") + +``` + + +In the above example, the values for `AWSCredentials` are saved with `my_s3_bucket` and will not be usable with any other blocks. + +### Update custom `Block` types + +Let's add a `bucket_folder` field to your custom `S3Bucket` block that represents the default path to read and write objects from (this field exists on [our implementation](https://github.com/PrefectHQ/prefect-aws/blob/main/prefect_aws/s3.py#L292)). + +Add the new field to the class definition: + +``` +class S3Bucket(Block): + bucket_name: str + credentials: AWSCredentials + bucket_folder: str = None + ... + +``` + + +Then [register the updated block type](#register-blocks) with either Prefect Cloud or your self-hosted Prefect server instance. + +If you have any existing blocks of this type that were created before the update and you'd prefer to not re-create them, migrate them to the new version of your block type by adding the missing values: + +``` +# Bypass Pydantic validation to allow your local Block class to load the old block version +my_s3_bucket_block = S3Bucket.load("my-s3-bucket", validate=False) + +# Set the new field to an appropriate value +my_s3_bucket_block.bucket_path = "my-default-bucket-path" + +# Overwrite the old block values and update the expected fields on the block +my_s3_bucket_block.save("my-s3-bucket", overwrite=True) + +``` + + +Register blocks +----------------------------------------------------- + +Prefect comes with many blocks pre-registered and ready to use. If you do not have a block available for use, you can register it. + +Blocks can be registered from a Python module available in the current environment with a CLI command like this: + +``` +prefect block register --module prefect_aws.credentials + +``` + + +This command is useful for registering all blocks found within a module in a [Prefect Integration library](https://docs.prefect.io/integrations/). + +Alternatively, if a custom block has been created in a `.py` file, the block can also be registered with the CLI command: + +``` +prefect block register --file my_block.py + +``` + + +The registered block will then be available for configuration. \ No newline at end of file diff --git a/docs/2.19.x/concepts/deployments.mdx b/docs/2.19.x/concepts/deployments.mdx new file mode 100644 index 000000000000..2a621c46206c --- /dev/null +++ b/docs/2.19.x/concepts/deployments.mdx @@ -0,0 +1,230 @@ +--- +title: Deployments +--- + +Deployments are server-side representations of flows. They store the crucial metadata needed for remote orchestration including _when_, _where_, and _how_ a workflow should run. Deployments elevate workflows from functions that you must call manually to API-managed entities that can be triggered remotely. + +Here we will focus largely on the metadata that defines a deployment and how it is used. Different ways of creating a deployment populate these fields differently. + +Overview +--------------------------------------- + +Every Prefect deployment references one and only one "entrypoint" flow (though that flow may itself call any number of subflows). Different deployments may reference the same underlying flow, a useful pattern when developing or promoting workflow changes through staged environments. + +The complete schema that defines a deployment is as follows: + +```python +class Deployment: + """ + Structure of the schema defining a deployment + """ + + # required defining data + name: str + flow_id: UUID + entrypoint: str + path: str = None + + # workflow scheduling and parametrization + parameters: Optional[Dict[str, Any]] = None + parameter_openapi_schema: Optional[Dict[str, Any]] = None + schedules: list[Schedule] = None + paused: bool = False + trigger: Trigger = None + + # metadata for bookkeeping + version: str = None + description: str = None + tags: list = None + + # worker-specific fields + work_pool_name: str = None + work_queue_name: str = None + infra_overrides: Optional[Dict[str, Any]] = None + pull_steps: Optional[Dict[str, Any]] = None + +``` + + +All methods for creating Prefect deployments are interfaces for populating this schema. Let's look at each section in turn. + +### Required data + +Deployments universally require both a `name` and a reference to an underlying `Flow`. In almost all instances of deployment creation, users do not need to concern themselves with the `flow_id` as most interfaces will only need the flow's name. Note that the deployment name is not required to be unique across all deployments but is required to be unique for a given flow ID. As a consequence, you will often see references to the deployment's unique identifying name `{FLOW_NAME}/{DEPLOYMENT_NAME}`. For example, triggering a run of a deployment from the Prefect CLI can be done via: + +``` +prefect deployment run my-first-flow/my-first-deployment + +``` + + +The other two fields are less obvious: + +* **`path`**: the _path_ can generally be interpreted as the runtime working directory for the flow. For example, if a deployment references a workflow defined within a Docker image, the `path` will be the absolute path to the parent directory where that workflow will run anytime the deployment is triggered. This interpretation is more subtle in the case of flows defined in remote filesystems. +* **`entrypoint`**: the _entrypoint_ of a deployment is a relative reference to a function decorated as a flow that exists on some filesystem. It is always specified relative to the `path`. Entrypoints use Python's standard path-to-object syntax (e.g., `path/to/file.py:function_name` or simply `path:object`). + +The entrypoint must reference the same flow as the flow ID. + +Note that Prefect requires that deployments reference flows defined _within Python files_. Flows defined within interactive REPLs or notebooks cannot currently be deployed as such. They are still valid flows that will be monitored by the API and observable in the UI whenever they are run, but Prefect cannot trigger them. + +**Deployments do not contain code definitions** + + +Deployment metadata references code that exists in potentially diverse locations within your environment. This separation of concerns means that your flow code stays within your storage and execution infrastructure and never lives on the Prefect server or database. + +This is the heart of the Prefect hybrid model: there's a boundary between your proprietary assets, such as your flow code, and the Prefect backend (including [Prefect Cloud](https://docs.prefect.io/cloud/)). + +### Scheduling and parametrization + +One of the primary motivations for creating deployments of flows is to remotely _schedule_ and _trigger_ them. Just as flows can be called as functions with different input values, so can deployments be triggered or scheduled with different values through the use of parameters. + +The six fields here capture the necessary metadata to perform such actions: + +* **`schedules`**: a list of [schedule objects](https://docs.prefect.io/concepts/schedules/). Most of the convenient interfaces for creating deployments allow users to avoid creating this object themselves. For example, when [updating a deployment schedule in the UI](https://docs.prefect.io/concepts/schedules/#creating-schedules-through-the-ui) basic information such as a cron string or interval is all that's required. +* **`trigger`** (Cloud-only): triggers allow you to define event-based rules for running a deployment. For more information see [Automations](https://docs.prefect.io/concepts/automations/). +* **`parameter_openapi_schema`**: an [OpenAPI compatible schema](https://swagger.io/specification/) that defines the types and defaults for the flow's parameters. This is used by both the UI and the backend to expose options for creating manual runs as well as type validation. +* **`parameters`**: default values of flow parameters that this deployment will pass on each run. These can be overwritten through a trigger or when manually creating a custom run. +* **`enforce_parameter_schema`**: a boolean flag that determines whether the API should validate the parameters passed to a flow run against the schema defined by `parameter_openapi_schema`. + + +**Scheduling is asynchronous and decoupled** + +Because deployments are nothing more than metadata, runs can be created at anytime. Note that pausing a schedule, updating your deployment, and other actions reset your auto-scheduled runs. + +#### Running a deployed flow from within Python flow code + +Prefect provides a [`run_deployment` function](https://docs.prefect.io/api-ref/prefect/deployments/deployments/#prefect.deployments.deployments.run_deployment) that can be used to schedule the run of an existing deployment when your Python code executes. + +```python +from prefect.deployments import run_deployment + +def main(): + run_deployment(name="my_flow_name/my_deployment_name") + +``` + + +**Run a deployment without blocking** + +By default, `run_deployment` blocks until the scheduled flow run finishes executing. Pass `timeout=0` to return immediately and not block. + + +If you call `run_deployment` from within a flow or task, the scheduled flow run will be linked to the calling flow run (or the calling task's flow run) as a subflow run by default. + +Subflow runs have different behavior than regular flow runs. For example, a subflow run can't be suspended independently of its parent flow. If you'd rather not link the scheduled flow run to the calling flow or task run, you can disable this behavior by passing `as_subflow=False`: + +```python +from prefect import flow +from prefect.deployments import run_deployment + + +@flow +def my_flow(): + # The scheduled flow run will not be linked to this flow as a subflow. + run_deployment(name="my_other_flow/my_deployment_name", as_subflow=False) + +``` + + +The return value of `run_deployment` is a [FlowRun](https://docs.prefect.io/api-ref/prefect/client/schemas/#prefect.client.schemas.objects.FlowRun) object containing metadata about the scheduled run. You can use this object to retrieve information about the run after calling `run_deployment`: + +```python +from prefect import get_client +from prefect.deployments import run_deployment + +def main(): + flow_run = run_deployment(name="my_flow_name/my_deployment_name") + flow_run_id = flow_run.id + + # If you save the flow run's ID, you can use it later to retrieve + # flow run metadata again, e.g. to check if it's completed. + async with get_client() as client: + flow_run = client.read_flow_run(flow_run_id) + print(f"Current state of the flow run: {flow_run.state}") + +``` + + +**Using the Prefect client** + +For more information on using the Prefect client to interact with Prefect's REST API, see [our guide](https://docs.prefect.io/guides/using-the-client/). + +Versioning and bookkeeping +--------------------------------------------------------------------------- + +Versions, descriptions and tags are omnipresent fields throughout Prefect that can be easy to overlook. However, putting some extra thought into how you use these fields can pay dividends down the road. + +* **`version`**: versions are always set by the client and can be any arbitrary string. We recommend tightly coupling this field on your deployments to your software development lifecycle. For example if you leverage `git` to manage code changes, use either a tag or commit hash in this field. If you don't set a value for the version, Prefect will compute a hash +* **`description`**: the description field of a deployment is a place to provide rich reference material for downstream stakeholders such as intended use and parameter documentation. Markdown formatting will be rendered in the Prefect UI, allowing for section headers, links, tables, and other formatting. If not provided explicitly, Prefect will use the docstring of your flow function as a default value. +* **`tags`**: tags are a mechanism for grouping related work together across a diverse set of objects. Tags set on a deployment will be inherited by that deployment's flow runs. These tags can then be used to filter what runs are displayed on the primary UI dashboard, allowing you to customize different views into your work. In addition, in Prefect Cloud you can easily find objects through searching by tag. + +All of these bits of metadata can be leveraged to great effect by injecting them into the processes that Prefect is orchestrating. For example you can use both run ID and versions to organize files that you produce from your workflows, or by associating your flow run's tags with the metadata of a job it orchestrates. This metadata is available during execution through [Prefect runtime](https://docs.prefect.io/guides/runtime-context/). + +**Everything has a version** + +Deployments aren't the only entity in Prefect with a version attached; both flows and tasks also have versions that can be set through their respective decorators. These versions will be sent to the API anytime the flow or task is run and thereby allow you to audit your changes across all levels. + +### Workers and Work Pools + +[Workers and work pools](https://docs.prefect.io/concepts/work-pools/) are an advanced deployment pattern that allow you to dynamically provision infrastructure for each flow run. In addition, the work pool job template interface allows users to create and govern opinionated interfaces to their workflow infrastructure. To do this, a deployment using workers needs to evaluate the following fields: + +* **`work_pool_name`**: the name of the work pool this deployment will be associated with. Work pool types mirror infrastructure types and therefore the decision here affects the options available for the other fields. +* **`work_queue_name`**: if you are using work queues to either manage priority or concurrency, you can associate a deployment with a specific queue within a work pool using this field. +* **`infra_overrides`**: often called `job_variables` within various interfaces, this field allows deployment authors to customize whatever infrastructure options have been exposed on this work pool. This field is often used for things such as Docker image names, Kubernetes annotations and limits, and environment variables. +* **`pull_steps`**: a JSON description of steps that should be performed to retrieve flow code or configuration and prepare the runtime environment for workflow execution. + +Pull steps allow users to highly decouple their workflow architecture. For example, a common use of pull steps is to dynamically pull code from remote filesystems such as GitHub with each run of their deployment. + +For more information see [the guide to deploying with a worker](https://docs.prefect.io/guides/prefect-deploy/). + +Two approaches to deployments +--------------------------------------------------------------------------------- + +There are two primary ways to deploy flows with Prefect, differentiated by how much control Prefect has over the infrastructure in which the flows run. + +In one setup, deploying Prefect flows is analogous to deploying a webserver - users author their workflows and then start a long-running process (often within a Docker container) that is responsible for managing all of the runs for the associated deployment(s). + +In the other setup, you do a little extra up-front work to set up a [work pool and a base job template that defines how individual flow runs will be submitted to infrastructure](https://docs.prefect.io/guides/prefect-deploy). + +Prefect provides several [types of work pools](https://docs.prefect.io/concepts/work-pools/#work-pool-types) corresponding to different types of infrastructure. Prefect Cloud provides a [Prefect Managed work pool](https://docs.prefect.io/guides/managed-execution/) option that is the simplest way to run workflows remotely. A cloud-provider account, such as AWS, is not required with a Prefect Managed work pool. + +Some work pool types require a client-side worker to submit job definitions to the appropriate infrastructure with each run. + +Each of these setups can support production workloads. The choice ultimately boils down to your use case and preferences. Read further to decide which setup is best for your situation. + +### Serving flows on long-lived infrastructure + +When you have several flows running regularly, [the `serve` method](https://docs.prefect.io/concepts/flows/#serving-a-flow) of the `Flow` object or [the `serve` utility](https://docs.prefect.io/concepts/flows/#serving-multiple-flows-at-once) is a great option for managing multiple flows simultaneously. + +Once you have authored your flow and decided on its deployment settings as described above, all that's left is to run this long-running process in a location of your choosing. The process will stay in communication with the Prefect API, monitoring for work and submitting each run within an individual subprocess. Note that because runs are submitted to subprocesses, any external infrastructure configuration will need to be setup beforehand and kept associated with this process. + +This approach has many benefits: + +* Users are in complete control of their infrastructure, and anywhere the "serve" Python process can run is a suitable deployment environment. +* It is simple to reason about. +* Creating deployments requires a minimal set of decisions. +* Iteration speed is fast. + +However, there are a few reasons you might consider running flows on dynamically provisioned infrastructure with work pools instead: + +* Flows that have expensive infrastructure needs may be more costly in this setup due to the long-running process. +* Flows with heterogeneous infrastructure needs across runs will be more difficult to configure and schedule. +* Large volumes of deployments can be harder to track. +* If your internal team structure requires that deployment authors be members of a different team than the team managing infrastructure, the work pool interface may be preferred. + +### Dynamically provisioning infrastructure with work pools + +[Work pools](https://docs.prefect.io/concepts/work-pools/) allow Prefect to exercise greater control of the infrastructure on which flows run. Options for [serverless work pools](https://docs.prefect.io/guides/deployment/serverless-workers/) allow you to scale to zero when workflows aren't running. Prefect even provides you with the ability to [provision cloud infrastructure via a single CLI command](https://docs.prefect.io/guides/deployment/push-work-pools/#automatically-creating-a-new-push-work-pool-and-provisioning-infrastructure), if you use a Prefect Cloud push work pool option. + +With work pools: + +* You can configure and monitor infrastructure configuration within the Prefect UI. +* Infrastructure is ephemeral and dynamically provisioned. +* Prefect is more infrastructure-aware and therefore collects more event data from your infrastructure by default. +* Highly decoupled setups are possible. + + +**You don't have to commit to one approach** + +You are not required to use only one of these approaches for your deployments. You can mix and match approaches based on the needs of each flow. Further, you can change the deployment approach for a particular flow as its needs evolve. For example, you might use workers for your expensive machine learning pipelines, but use the serve mechanics for smaller, more frequent file-processing pipelines. + \ No newline at end of file diff --git a/docs/2.19.x/concepts/flows.mdx b/docs/2.19.x/concepts/flows.mdx new file mode 100644 index 000000000000..6b8a4e7ef006 --- /dev/null +++ b/docs/2.19.x/concepts/flows.mdx @@ -0,0 +1,1237 @@ +--- +title: Flows +description: Flows are the most central Prefect object. A flow is a container for workflow logic as-code and allows users to configure how their workflows behave. Flows are defined as Python functions, and any Python function is eligible to be a flow. +--- + +Flows overview +--------------------------------------------------- + +Flows can be thought of as special types of functions. They can take inputs, perform work, and return an output. In fact, you can turn any function into a Prefect flow by adding the `@flow` decorator. When a function becomes a flow, its behavior changes, giving it the following advantages: + +* Every invocation of this function is tracked and all state transitions are reported to the API, allowing observation of flow execution. +* Input arguments are automatically type checked and coerced to the appropriate types. +* Retries can be performed on failure. +* Timeouts can be enforced to prevent unintentional, long-running workflows. + +Flows also take advantage of automatic Prefect logging to capture details about [flow runs](#flow-runs) such as run time and final state. + +Flows can include calls to [tasks](https://docs.prefect.io/concepts/tasks/) as well as to other flows, which Prefect calls ["subflows"](#composing-flows) in this context. Flows may be defined within modules and imported for use as subflows in your flow definitions. + +[Deployments](https://docs.prefect.io/concepts/deployments/) elevate individual workflows from functions that you call manually to API-managed entities. + +Flow runs +----------------------------------------- + +A _flow run_ represents a single execution of the flow. + +You can create a flow run by calling the flow manually. For example, by running a Python script or importing the flow into an interactive session and calling it. + +You can also create a flow run by: + +* Using external schedulers such as `cron` to invoke a flow function +* Creating a [deployment](https://docs.prefect.io/concepts/deployments/) on Prefect Cloud or a locally run Prefect server. +* Creating a flow run for the deployment via a schedule, the Prefect UI, or the Prefect API. + +However you run the flow, the Prefect API monitors the flow run, capturing flow run state for observability. + +When you run a flow that contains tasks or additional flows, Prefect will track the relationship of each child run to the parent flow run. + +![Prefect UI](/images/flows1.png) + +Writing flows +------------------------------------------------- + +The [`@flow`](https://docs.prefect.io/2.19.1/api-ref/prefect/flows/#prefect.flows.flow) decorator is used to designate a flow: + +```python +from prefect import flow + +@flow +def my_flow(): + return + +``` + + +There are no rigid rules for what code you include within a flow definition - all valid Python is acceptable. + +Flows are uniquely identified by name. You can provide a `name` parameter value for the flow. If you don't provide a name, Prefect uses the flow function name. + +```python +@flow(name="My Flow") +def my_flow(): + return + +``` + + +Flows can call tasks to allow Prefect to orchestrate and track more granular units of work: + +```python +from prefect import flow, task + +@task +def print_hello(name): + print(f"Hello {name}!") + +@flow(name="Hello Flow") +def hello_world(name="world"): + print_hello(name) + +``` + + +**Flows and tasks** + + +There's nothing stopping you from putting all of your code in a single flow function — Prefect will happily run it! + +However, organizing your workflow code into smaller flow and task units lets you take advantage of Prefect features like retries, more granular visibility into runtime state, the ability to determine final state regardless of individual task state, and more. + +In addition, if you put all of your workflow logic in a single flow function and any line of code fails, the entire flow will fail and must be retried from the beginning. This can be avoided by breaking up the code into multiple tasks. + +You may call any number of other tasks, subflows, and even regular Python functions within your flow. You can pass parameters to your flow function that will be used elsewhere in the workflow, and Prefect will report on the progress and [final state](#final-state-determination) of any invocation. + +Prefect encourages "small tasks" — each one should represent a single logical step of your workflow. This allows Prefect to better contain task failures. + + +Flow settings +------------------------------------------------- + +Flows allow a great deal of configuration by passing arguments to the decorator. Flows accept the following optional settings. + + +| Argument | Description | +|-----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `description` | An optional string description for the flow. If not provided, the description will be pulled from the docstring for the decorated function. | +| `name` | An optional name for the flow. If not provided, the name will be inferred from the function. | +| `retries` | An optional number of times to retry on flow run failure. | +| `retry_delay_seconds` | An optional number of seconds to wait before retrying the flow after failure. This is only applicable if `retries` is nonzero. | +| `flow_run_name` | An optional name to distinguish runs of this flow; this name can be provided as a string template with the flow's parameters as variables; this name can also be provided as a function that returns a string. | +| `task_runner` | An optional [task runner](/2.19.1/concepts/task-runners) to use for task execution within the flow when you `.submit()` tasks. If not provided and you `.submit()` tasks, the `ConcurrentTaskRunner` will be used. | +| `timeout_seconds` | An optional number of seconds indicating a maximum runtime for the flow. If the flow exceeds this runtime, it will be marked as failed. Flow execution may continue until the next task is called. | +| `validate_parameters` | Boolean indicating whether parameters passed to flows are validated by Pydantic. Default is `True`. | +| `version` | An optional version string for the flow. If not provided, we will attempt to create a version string as a hash of the file containing the wrapped function. If the file cannot be located, the version will be null. | + + +For example, you can provide a `name` value for the flow. Here we've also used the optional `description` argument and specified a non-default task runner. + +```python +from prefect import flow +from prefect.task_runners import SequentialTaskRunner + +@flow(name="My Flow", + description="My flow using SequentialTaskRunner", + task_runner=SequentialTaskRunner()) +def my_flow(): + return + +``` + + +You can also provide the description as the docstring on the flow function. + +```python +@flow(name="My Flow", + task_runner=SequentialTaskRunner()) +def my_flow(): + """My flow using SequentialTaskRunner""" + return + +``` + + +You can distinguish runs of this flow by providing a `flow_run_name`. This setting accepts a string that can optionally contain templated references to the parameters of your flow. The name will be formatted using Python's standard string formatting syntax as can be seen here: + +```python +import datetime +from prefect import flow + +@flow(flow_run_name="{name}-on-{date:%A}") +def my_flow(name: str, date: datetime.datetime): + pass + +# creates a flow run called 'marvin-on-Thursday' +my_flow(name="marvin", date=datetime.datetime.now(datetime.timezone.utc)) + +``` + + +Additionally this setting also accepts a function that returns a string for the flow run name: + +```python +import datetime +from prefect import flow + +def generate_flow_run_name(): + date = datetime.datetime.now(datetime.timezone.utc) + + return f"{date:%A}-is-a-nice-day" + +@flow(flow_run_name=generate_flow_run_name) +def my_flow(name: str): + pass + +# creates a flow run called 'Thursday-is-a-nice-day' +if __name__ == "__main__": + my_flow(name="marvin") + +``` + + +If you need access to information about the flow, use the `prefect.runtime` module. For example: + +```python +from prefect import flow +from prefect.runtime import flow_run + +def generate_flow_run_name(): + flow_name = flow_run.flow_name + + parameters = flow_run.parameters + name = parameters["name"] + limit = parameters["limit"] + + return f"{flow_name}-with-{name}-and-{limit}" + +@flow(flow_run_name=generate_flow_run_name) +def my_flow(name: str, limit: int = 100): + pass + +# creates a flow run called 'my-flow-with-marvin-and-100' +if __name__ == "__main__": + my_flow(name="marvin") + +``` + + +Note that `validate_parameters` will check that input values conform to the annotated types on the function. Where possible, values will be coerced into the correct type. For example, if a parameter is defined as `x: int` and "5" is passed, it will be resolved to `5`. If set to `False`, no validation will be performed on flow parameters. + +Separating logic into tasks +----------------------------------------------------------------------------- + +The simplest workflow is just a `@flow` function that does all the work of the workflow. + +```python +from prefect import flow + +@flow(name="Hello Flow") +def hello_world(name="world"): + print(f"Hello {name}!") + +if __name__ == "__main__": + hello_world("Marvin") + +``` + + +When you run this flow, you'll see output like the following: + +``` +$ python hello.py +15:11:23.594 | INFO | prefect.engine - Created flow run 'benevolent-donkey' for flow 'hello-world' +15:11:23.594 | INFO | Flow run 'benevolent-donkey' - Using task runner 'ConcurrentTaskRunner' +Hello Marvin! +15:11:24.447 | INFO | Flow run 'benevolent-donkey' - Finished in state Completed() + +``` + + +A better practice is to create `@task` functions that do the specific work of your flow, and use your `@flow` function as the conductor that orchestrates the flow of your application: + +```python +from prefect import flow, task + +@task(name="Print Hello") +def print_hello(name): + msg = f"Hello {name}!" + print(msg) + return msg + +@flow(name="Hello Flow") +def hello_world(name="world"): + message = print_hello(name) + +if __name__ == "__main__": + hello_world("Marvin") + +``` + + +When you run this flow, you'll see the following output, which illustrates how the work is encapsulated in a task run. + +```python +$ python hello.py +15:15:58.673 | INFO | prefect.engine - Created flow run 'loose-wolverine' for flow 'Hello Flow' +15:15:58.674 | INFO | Flow run 'loose-wolverine' - Using task runner 'ConcurrentTaskRunner' +15:15:58.973 | INFO | Flow run 'loose-wolverine' - Created task run 'Print Hello-84f0fe0e-0' for task 'Print Hello' +Hello Marvin! +15:15:59.037 | INFO | Task run 'Print Hello-84f0fe0e-0' - Finished in state Completed() +15:15:59.568 | INFO | Flow run 'loose-wolverine' - Finished in state Completed('All states completed.') + +``` + + +Visualizing flow structure +--------------------------------------------------------------------------- + +You can get a quick sense of the structure of your flow using the `.visualize()` method on your flow. Calling this method will attempt to produce a schematic diagram of your flow and tasks without actually running your flow code. + + +**Functions and code not inside of flows or tasks will still be run when calling `.visualize()`. This may have unintended consequences. Place your code into tasks to avoid unintended execution.** + + + +**To use the `visualize()` method, Graphviz must be installed and on your PATH. Please install Graphviz from [http://www.graphviz.org/download/](http://www.graphviz.org/download/). And note: just installing the `graphviz` python package is not sufficient.** + + +```python +from prefect import flow, task + +@task(name="Print Hello") +def print_hello(name): + msg = f"Hello {name}!" + print(msg) + return msg + +@task(name="Print Hello Again") +def print_hello_again(name): + msg = f"Hello {name}!" + print(msg) + return msg + +@flow(name="Hello Flow") +def hello_world(name="world"): + message = print_hello(name) + message2 = print_hello_again(message) + +if __name__ == "__main__": + hello_world.visualize() + +``` + + +![A simple flow visualized with the .visualize() method](/images/flows2.png) + +Prefect cannot automatically produce a schematic for dynamic workflows, such as those with loops or if/else control flow. In this case, you can provide tasks with mock return values for use in the `visualize()` call. + +```python +from prefect import flow, task +@task(viz_return_value=[4]) +def get_list(): + return [1, 2, 3] + +@task +def append_one(n): + return n.append(6) + +@flow +def viz_return_value_tracked(): + l = get_list() + for num in range(3): + l.append(5) + append_one(l) + +if __name__ == "__main__": + viz_return_value_tracked.visualize() + +``` + + +![A flow with return values visualized with the .visualize() method](/images/flows3.png) + +Composing flows +----------------------------------------------------- + +A _subflow_ run is created when a flow function is called inside the execution of another flow. The primary flow is the "parent" flow. The flow created within the parent is the "child" flow or "subflow." + +Subflow runs behave like normal flow runs. There is a full representation of the flow run in the backend as if it had been called separately. When a subflow starts, it will create a new [task runner](https://docs.prefect.io/concepts/task-runners/) for tasks within the subflow. When the subflow completes, the task runner is shut down. + +Subflows will block execution of the parent flow until completion. However, asynchronous subflows can be run concurrently by using [AnyIO task groups](https://anyio.readthedocs.io/en/stable/tasks.html) or [asyncio.gather](https://docs.python.org/3/library/asyncio-task.html#id6). + +Subflows differ from normal flows in that they will resolve any passed task futures into data. This allows data to be passed from the parent flow to the child easily. + +The relationship between a child and parent flow is tracked by creating a special task run in the parent flow. This task run will mirror the state of the child flow run. + +A task that represents a subflow will be annotated as such in its `state_details` via the presence of a `child_flow_run_id` field. A subflow can be identified via the presence of a `parent_task_run_id` on `state_details`. + +You can define multiple flows within the same file. Whether running locally or via a [deployment](https://docs.prefect.io/concepts/deployments/), you must indicate which flow is the entrypoint for a flow run. + +**Cancelling subflow runs** + +Inline subflow runs, specifically those created without `run_deployment`, cannot be cancelled without cancelling their parent flow run. If you may need to cancel a subflow run independent of its parent flow run, we recommend deploying it separately and starting it using the [run\_deployment](https://docs.prefect.io/api-ref/prefect/deployments/deployments/#prefect.deployments.deployments.run_deployment) function. + + +```python +from prefect import flow, task + +@task(name="Print Hello") +def print_hello(name): + msg = f"Hello {name}!" + print(msg) + return msg + +@flow(name="Subflow") +def my_subflow(msg): + print(f"Subflow says: {msg}") + +@flow(name="Hello Flow") +def hello_world(name="world"): + message = print_hello(name) + my_subflow(message) + +if __name__ == "__main__": + hello_world("Marvin") + +``` + + +You can also define flows or tasks in separate modules and import them for usage. For example, here's a simple subflow module: + +```python +from prefect import flow, task + +@flow(name="Subflow") +def my_subflow(msg): + print(f"Subflow says: {msg}") + +``` + + +Here's a parent flow that imports and uses `my_subflow()` as a subflow: + +```python +from prefect import flow, task +from subflow import my_subflow + +@task(name="Print Hello") +def print_hello(name): + msg = f"Hello {name}!" + print(msg) + return msg + +@flow(name="Hello Flow") +def hello_world(name="world"): + message = print_hello(name) + my_subflow(message) + +hello_world("Marvin") + +``` + + +Running the `hello_world()` flow (in this example from the file `hello.py`) creates a flow run like this: + +```python +$ python hello.py +15:19:21.651 | INFO | prefect.engine - Created flow run 'daft-cougar' for flow 'Hello Flow' +15:19:21.651 | INFO | Flow run 'daft-cougar' - Using task runner 'ConcurrentTaskRunner' +15:19:21.945 | INFO | Flow run 'daft-cougar' - Created task run 'Print Hello-84f0fe0e-0' for task 'Print Hello' +Hello Marvin! +15:19:22.055 | INFO | Task run 'Print Hello-84f0fe0e-0' - Finished in state Completed() +15:19:22.107 | INFO | Flow run 'daft-cougar' - Created subflow run 'ninja-duck' for flow 'Subflow' +Subflow says: Hello Marvin! +15:19:22.794 | INFO | Flow run 'ninja-duck' - Finished in state Completed() +15:19:23.215 | INFO | Flow run 'daft-cougar' - Finished in state Completed('All states completed.') + +``` + + +**Subflows or tasks?** + +In Prefect you can call tasks _or_ subflows to do work within your workflow, including passing results from other tasks to your subflow. So a common question is: + +"When should I use a subflow instead of a task?" + +We recommend writing tasks that do a discrete, specific piece of work in your workflow: calling an API, performing a database operation, analyzing or transforming a data point. Prefect tasks are well suited to parallel or distributed execution using distributed computation frameworks such as Dask or Ray. For troubleshooting, the more granular you create your tasks, the easier it is to find and fix issues should a task fail. + +Subflows enable you to group related tasks within your workflow. Here are some scenarios where you might choose to use a subflow rather than calling tasks individually: + +* Observability: Subflows, like any other flow run, have first-class observability within the Prefect UI and Prefect Cloud. You'll see subflow status in the **Flow Runs** dashboard rather than having to dig down into the tasks within a specific flow run. See [Final state determination](#final-state-determination) for some examples of leveraging task state within flows. +* Conditional flows: If you have a group of tasks that run only under certain conditions, you can group them within a subflow and conditionally run the subflow rather than each task individually. +* Parameters: Flows have first-class support for parameterization, making it easy to run the same group of tasks in different use cases by simply passing different parameters to the subflow in which they run. +* Task runners: Subflows enable you to specify the task runner used for tasks within the flow. For example, if you want to optimize parallel execution of certain tasks with Dask, you can group them in a subflow that uses the Dask task runner. You can use a different task runner for each subflow. + + +Parameters +------------------------------------------- + +Flows can be called with both positional and keyword arguments. These arguments are resolved at runtime into a dictionary of **parameters** mapping name to value. These parameters are stored by the Prefect orchestration engine on the flow run object. + + **Prefect API requires keyword arguments** + +When creating flow runs from the Prefect API, parameter names must be specified when overriding defaults — they cannot be positional. + + + +Type hints provide an easy way to enforce typing on your flow parameters via [pydantic](https://pydantic-docs.helpmanual.io/). This means _any_ pydantic model used as a type hint within a flow will be coerced automatically into the relevant object type: + +```python +from prefect import flow +from pydantic import BaseModel + +class Model(BaseModel): + a: int + b: float + c: str + +@flow +def model_validator(model: Model): + print(model) + +``` + + +Note that parameter values can be provided to a flow via API using a [deployment](https://docs.prefect.io/concepts/deployments/). Flow run parameters sent to the API on flow calls are coerced to a serializable form. Type hints on your flow functions provide you a way of automatically coercing JSON provided values to their appropriate Python representation. + +For example, to automatically convert something to a datetime: + +```python +from prefect import flow +from datetime import datetime + +@flow +def what_day_is_it(date: datetime = None): + if date is None: + date = datetime.now(timezone.utc) + print(f"It was {date.strftime('%A')} on {date.isoformat()}") + +if __name__ == "__main__": + what_day_is_it("2021-01-01T02:00:19.180906") + +``` + + +When you run this flow, you'll see the following output: + +``` +It was Friday on 2021-01-01T02:00:19.180906 + +``` + + +Parameters are validated before a flow is run. If a flow call receives invalid parameters, a flow run is created in a `Failed` state. If a flow run for a deployment receives invalid parameters, it will move from a `Pending` state to a `Failed` without entering a `Running` state. + + **Flow run parameters cannot exceed `512kb` in size** + + + +Final state determination +------------------------------------------------------------------------- + +**Prerequisite** + +Read the documentation about [states](https://docs.prefect.io/concepts/states) before proceeding with this section. + +The final state of the flow is determined by its return value. The following rules apply: + +* If an exception is raised directly in the flow function, the flow run is marked as failed. +* If the flow does not return a value (or returns `None`), its state is determined by the states of all of the tasks and subflows within it. +* If _any_ task run or subflow run failed, then the final flow run state is marked as `FAILED`. +* If _any_ task run was cancelled, then the final flow run state is marked as `CANCELLED`. +* If a flow returns a manually created state, it is used as the state of the final flow run. This allows for manual determination of final state. +* If the flow run returns _any other object_, then it is marked as completed. + +The following examples illustrate each of these cases: + +### Raise an exception + +If an exception is raised within the flow function, the flow is immediately marked as failed. + +```python +from prefect import flow + +@flow +def always_fails_flow(): + raise ValueError("This flow immediately fails") + +if __name__ == "__main__": + always_fails_flow() + +``` + + +Running this flow produces the following result: + +``` +22:22:36.864 | INFO | prefect.engine - Created flow run 'acrid-tuatara' for flow 'always-fails-flow' +22:22:36.864 | INFO | Flow run 'acrid-tuatara' - Starting 'ConcurrentTaskRunner'; submitted tasks will be run concurrently... +22:22:37.060 | ERROR | Flow run 'acrid-tuatara' - Encountered exception during execution: +Traceback (most recent call last):... +ValueError: This flow immediately fails + +``` + + +### Return `none` + +A flow with no return statement is determined by the state of all of its task runs. + +```python +from prefect import flow, task + +@task +def always_fails_task(): + raise ValueError("I fail successfully") + +@task +def always_succeeds_task(): + print("I'm fail safe!") + return "success" + +@flow +def always_fails_flow(): + always_fails_task.submit().result(raise_on_failure=False) + always_succeeds_task() + +if __name__ == "__main__": + always_fails_flow() + +``` + + +Running this flow produces the following result: + +``` +18:32:05.345 | INFO | prefect.engine - Created flow run 'auburn-lionfish' for flow 'always-fails-flow' +18:32:05.346 | INFO | Flow run 'auburn-lionfish' - Starting 'ConcurrentTaskRunner'; submitted tasks will be run concurrently... +18:32:05.582 | INFO | Flow run 'auburn-lionfish' - Created task run 'always_fails_task-96e4be14-0' for task 'always_fails_task' +18:32:05.582 | INFO | Flow run 'auburn-lionfish' - Submitted task run 'always_fails_task-96e4be14-0' for execution. +18:32:05.610 | ERROR | Task run 'always_fails_task-96e4be14-0' - Encountered exception during execution: +Traceback (most recent call last): + ... +ValueError: I fail successfully +18:32:05.638 | ERROR | Task run 'always_fails_task-96e4be14-0' - Finished in state Failed('Task run encountered an exception.') +18:32:05.658 | INFO | Flow run 'auburn-lionfish' - Created task run 'always_succeeds_task-9c27db32-0' for task 'always_succeeds_task' +18:32:05.659 | INFO | Flow run 'auburn-lionfish' - Executing 'always_succeeds_task-9c27db32-0' immediately... +I'm fail safe! +18:32:05.703 | INFO | Task run 'always_succeeds_task-9c27db32-0' - Finished in state Completed() +18:32:05.730 | ERROR | Flow run 'auburn-lionfish' - Finished in state Failed('1/2 states failed.') +Traceback (most recent call last): + ... +ValueError: I fail successfully + +``` + + +### Return a future + +If a flow returns one or more futures, the final state is determined based on the underlying states. + +```python +from prefect import flow, task + +@task +def always_fails_task(): + raise ValueError("I fail successfully") + +@task +def always_succeeds_task(): + print("I'm fail safe!") + return "success" + +@flow +def always_succeeds_flow(): + x = always_fails_task.submit().result(raise_on_failure=False) + y = always_succeeds_task.submit(wait_for=[x]) + return y + +if __name__ == "__main__": + always_succeeds_flow() + +``` + + +Running this flow produces the following result — it succeeds because it returns the future of the task that succeeds: + +``` +18:35:24.965 | INFO | prefect.engine - Created flow run 'whispering-guan' for flow 'always-succeeds-flow' +18:35:24.965 | INFO | Flow run 'whispering-guan' - Starting 'ConcurrentTaskRunner'; submitted tasks will be run concurrently... +18:35:25.204 | INFO | Flow run 'whispering-guan' - Created task run 'always_fails_task-96e4be14-0' for task 'always_fails_task' +18:35:25.205 | INFO | Flow run 'whispering-guan' - Submitted task run 'always_fails_task-96e4be14-0' for execution. +18:35:25.232 | ERROR | Task run 'always_fails_task-96e4be14-0' - Encountered exception during execution: +Traceback (most recent call last): + ... +ValueError: I fail successfully +18:35:25.265 | ERROR | Task run 'always_fails_task-96e4be14-0' - Finished in state Failed('Task run encountered an exception.') +18:35:25.289 | INFO | Flow run 'whispering-guan' - Created task run 'always_succeeds_task-9c27db32-0' for task 'always_succeeds_task' +18:35:25.289 | INFO | Flow run 'whispering-guan' - Submitted task run 'always_succeeds_task-9c27db32-0' for execution. +I'm fail safe! +18:35:25.335 | INFO | Task run 'always_succeeds_task-9c27db32-0' - Finished in state Completed() +18:35:25.362 | INFO | Flow run 'whispering-guan' - Finished in state Completed('All states completed.') + +``` + + +### Return multiple states or futures + +If a flow returns a mix of futures and states, the final state is determined by resolving all futures to states, then determining if any of the states are not `COMPLETED`. + +```python +from prefect import task, flow + +@task +def always_fails_task(): + raise ValueError("I am bad task") + +@task +def always_succeeds_task(): + return "foo" + +@flow +def always_succeeds_flow(): + return "bar" + +@flow +def always_fails_flow(): + x = always_fails_task() + y = always_succeeds_task() + z = always_succeeds_flow() + return x, y, z + +``` + + +Running this flow produces the following result. It fails because one of the three returned futures failed. Note that the final state is `Failed`, but the states of each of the returned futures is included in the flow state: + +``` +20:57:51.547 | INFO | prefect.engine - Created flow run 'impartial-gorilla' for flow 'always-fails-flow' +20:57:51.548 | INFO | Flow run 'impartial-gorilla' - Using task runner 'ConcurrentTaskRunner' +20:57:51.645 | INFO | Flow run 'impartial-gorilla' - Created task run 'always_fails_task-58ea43a6-0' for task 'always_fails_task' +20:57:51.686 | INFO | Flow run 'impartial-gorilla' - Created task run 'always_succeeds_task-c9014725-0' for task 'always_succeeds_task' +20:57:51.727 | ERROR | Task run 'always_fails_task-58ea43a6-0' - Encountered exception during execution: +Traceback (most recent call last):... +ValueError: I am bad task +20:57:51.787 | INFO | Task run 'always_succeeds_task-c9014725-0' - Finished in state Completed() +20:57:51.808 | INFO | Flow run 'impartial-gorilla' - Created subflow run 'unbiased-firefly' for flow 'always-succeeds-flow' +20:57:51.884 | ERROR | Task run 'always_fails_task-58ea43a6-0' - Finished in state Failed('Task run encountered an exception.') +20:57:52.438 | INFO | Flow run 'unbiased-firefly' - Finished in state Completed() +20:57:52.811 | ERROR | Flow run 'impartial-gorilla' - Finished in state Failed('1/3 states failed.') +Failed(message='1/3 states failed.', type=FAILED, result=(Failed(message='Task run encountered an exception.', type=FAILED, result=ValueError('I am bad task'), task_run_id=5fd4c697-7c4c-440d-8ebc-dd9c5bbf2245), Completed(message=None, type=COMPLETED, result='foo', task_run_id=df9b6256-f8ac-457c-ba69-0638ac9b9367), Completed(message=None, type=COMPLETED, result='bar', task_run_id=cfdbf4f1-dccd-4816-8d0f-128750017d0c)), flow_run_id=6d2ec094-001a-4cb0-a24e-d2051db6318d) + +``` + + +**Returning multiple states** + +When returning multiple states, they must be contained in a `set`, `list`, or `tuple`. If other collection types are used, the result of the contained states will not be checked. + +### Return a manual state + +If a flow returns a manually created state, the final state is determined based on the return value. + +```python +from prefect import task, flow +from prefect.states import Completed, Failed + +@task +def always_fails_task(): + raise ValueError("I fail successfully") + +@task +def always_succeeds_task(): + print("I'm fail safe!") + return "success" + +@flow +def always_succeeds_flow(): + x = always_fails_task.submit() + y = always_succeeds_task.submit() + if y.result() == "success": + return Completed(message="I am happy with this result") + else: + return Failed(message="How did this happen!?") + +if __name__ == "__main__": + always_succeeds_flow() + +``` + + +Running this flow produces the following result. + +``` +18:37:42.844 | INFO | prefect.engine - Created flow run 'lavender-elk' for flow 'always-succeeds-flow' +18:37:42.845 | INFO | Flow run 'lavender-elk' - Starting 'ConcurrentTaskRunner'; submitted tasks will be run concurrently... +18:37:43.125 | INFO | Flow run 'lavender-elk' - Created task run 'always_fails_task-96e4be14-0' for task 'always_fails_task' +18:37:43.126 | INFO | Flow run 'lavender-elk' - Submitted task run 'always_fails_task-96e4be14-0' for execution. +18:37:43.162 | INFO | Flow run 'lavender-elk' - Created task run 'always_succeeds_task-9c27db32-0' for task 'always_succeeds_task' +18:37:43.163 | INFO | Flow run 'lavender-elk' - Submitted task run 'always_succeeds_task-9c27db32-0' for execution. +18:37:43.175 | ERROR | Task run 'always_fails_task-96e4be14-0' - Encountered exception during execution: +Traceback (most recent call last): + ... +ValueError: I fail successfully +I'm fail safe! +18:37:43.217 | ERROR | Task run 'always_fails_task-96e4be14-0' - Finished in state Failed('Task run encountered an exception.') +18:37:43.236 | INFO | Task run 'always_succeeds_task-9c27db32-0' - Finished in state Completed() +18:37:43.264 | INFO | Flow run 'lavender-elk' - Finished in state Completed('I am happy with this result') + +``` + + +### Return an object + +If the flow run returns _any other object_, then it is marked as completed. + +```python +from prefect import task, flow + +@task +def always_fails_task(): + raise ValueError("I fail successfully") + +@flow +def always_succeeds_flow(): + always_fails_task().submit() + return "foo" + +if __name__ == "__main__": + always_succeeds_flow() + +``` + + +Running this flow produces the following result. + +``` +21:02:45.715 | INFO | prefect.engine - Created flow run 'sparkling-pony' for flow 'always-succeeds-flow' +21:02:45.715 | INFO | Flow run 'sparkling-pony' - Using task runner 'ConcurrentTaskRunner' +21:02:45.816 | INFO | Flow run 'sparkling-pony' - Created task run 'always_fails_task-58ea43a6-0' for task 'always_fails_task' +21:02:45.853 | ERROR | Task run 'always_fails_task-58ea43a6-0' - Encountered exception during execution: +Traceback (most recent call last):... +ValueError: I am bad task +21:02:45.879 | ERROR | Task run 'always_fails_task-58ea43a6-0' - Finished in state Failed('Task run encountered an exception.') +21:02:46.593 | INFO | Flow run 'sparkling-pony' - Finished in state Completed() +Completed(message=None, type=COMPLETED, result='foo', flow_run_id=7240e6f5-f0a8-4e00-9440-a7b33fb51153) + +``` + + +Serving a flow +--------------------------------------------------- + +The simplest way to create a [deployment](https://docs.prefect.io/concepts/deployments/) for your flow is by calling its [`serve` method](https://docs.prefect.io/api-ref/prefect/flows/#prefect.flows.Flow.serve). This method creates a deployment for the flow and starts a long-running process that monitors for work from the Prefect server. When work is found, it is executed within its own isolated subprocess. + + +```python hello_world.py + +from prefect import flow + + +@flow(log_prints=True) +def hello_world(name: str = "world", goodbye: bool = False): + print(f"Hello {name} from Prefect! 🤗") + + if goodbye: + print(f"Goodbye {name}!") + + +if __name__ == "__main__": + # creates a deployment and stays running to monitor for work instructions generated on the server + + hello_world.serve(name="my-first-deployment", + tags=["onboarding"], + parameters={"goodbye": True}, + interval=60) + +``` + + +This interface provides all of the configuration needed for a deployment with no strong infrastructure requirements: + +* schedules +* event triggers +* metadata such as tags and description +* default parameter values + +**Schedules are auto-paused on shutdown** + + +By default, stopping the process running `flow.serve` will pause the schedule for the deployment (if it has one). When running this in environments where restarts are expected use the `pause_on_shutdown=False` flag to prevent this behavior: + +```python +if __name__ == "__main__": + hello_world.serve(name="my-first-deployment", + tags=["onboarding"], + parameters={"goodbye": True}, + pause_on_shutdown=False, + interval=60) + +``` + + +### Serving multiple flows at once + +You can take this further and serve multiple flows with the same process using the [`serve`](https://docs.prefect.io/api-ref/prefect/runner/#prefect.runner.serve) utility along with the `to_deployment` method of flows: + +```python +import time +from prefect import flow, serve + + +@flow +def slow_flow(sleep: int = 60): + "Sleepy flow - sleeps the provided amount of time (in seconds)." + time.sleep(sleep) + + +@flow +def fast_flow(): + "Fastest flow this side of the Mississippi." + return + + +if __name__ == "__main__": + slow_deploy = slow_flow.to_deployment(name="sleeper", interval=45) + fast_deploy = fast_flow.to_deployment(name="fast") + serve(slow_deploy, fast_deploy) + +``` + + +The behavior and interfaces are identical to the single flow case. + +Retrieve a flow from remote storage +--------------------------------------------------------------------------------------------- + +Flows can be retrieved from remote storage using the [`flow.from_source`](https://docs.prefect.io/api-ref/prefect/flows/#prefect.flows.Flow.from_source) method. + +`flow.from_source` accepts a git repository URL and an entrypoint pointing to the flow to load from the repository: + + + +```python load_from_url.py +from prefect import flow + +my_flow = flow.from_source( + source="https://github.com/PrefectHQ/prefect.git", + entrypoint="flows/hello_world.py:hello" +) + +if __name__ == "__main__": + my_flow() + +``` + + +``` +16:40:33.818 | INFO | prefect.engine - Created flow run 'muscular-perch' for flow 'hello' +16:40:34.048 | INFO | Flow run 'muscular-perch' - Hello world! +16:40:34.706 | INFO | Flow run 'muscular-perch' - Finished in state Completed() + +``` + + +A flow entrypoint is the path to the file the flow is located in and the name of the flow function separated by a colon. + +If you need additional configuration, such as specifying a private repository, you can provide a [`GitRepository`](https://docs.prefect.io/api-ref/prefect/flows/#prefect.runner.storage.GitRepository) instead of URL: + + + +```python load_from_storage.py +from prefect import flow +from prefect.runner.storage import GitRepository +from prefect.blocks.system import Secret + +my_flow = flow.from_source( + source=GitRepository( + url="https://github.com/org/private-repo.git", + branch="dev", + credentials={ + "access_token": Secret.load("github-access-token").get() + } + ), + entrypoint="flows.py:my_flow" +) + +if __name__ == "__main__": + my_flow() + +``` + + +**You can serve loaded flows** + +Flows loaded from remote storage can be served using the same [`serve`](#serving-a-flow) method as local flows: + + + +```python serve_loaded_flow.py +from prefect import flow + +if __name__ == "__main__": + flow.from_source( + source="https://github.com/org/repo.git", + entrypoint="flows.py:my_flow" + ).serve(name="my-deployment") + +``` +When you serve a flow loaded from remote storage, the serving process will periodically poll your remote storage for updates to the flow's code. This pattern allows you to update your flow code without restarting the serving process. + + + + +Pausing or suspending a flow run +--------------------------------------------------------------------------------------- + +Prefect provides you with the ability to halt a flow run with two functions that are similar, but slightly different. When a flow run is paused, code execution is stopped and the process continues to run. When a flow run is suspended, code execution is stopped and so is the process. + +### Pausing a flow run + +Prefect enables pausing an in-progress flow run for manual approval. Prefect exposes this functionality via the [`pause_flow_run`](https://docs.prefect.io/api-ref/prefect/engine/#prefect.engine.pause_flow_run) and [`resume_flow_run`](https://docs.prefect.io/api-ref/prefect/engine/#prefect.engine.resume_flow_run) functions. + +**Timeouts** + +Paused flow runs time out after one hour by default. After the timeout, the flow run will fail with a message saying it paused and never resumed. You can specify a different timeout period in seconds using the `timeout` parameter. + + +Most simply, `pause_flow_run` can be called inside a flow: + +```python +from prefect import task, flow, pause_flow_run, resume_flow_run + +@task +async def marvin_setup(): + return "a raft of ducks walk into a bar..." + + +@task +async def marvin_punchline(): + return "it's a wonder none of them ducked!" + + +@flow +async def inspiring_joke(): + await marvin_setup() + await pause_flow_run(timeout=600) # pauses for 10 minutes + await marvin_punchline() + +``` + + +You can also implement conditional pauses: + +```python +from prefect import task, flow, pause_flow_run + +@task +def task_one(): + for i in range(3): + sleep(1) + print(i) + +@flow(log_prints=True) +def my_flow(): + terminal_state = task_one.submit(return_state=True) + if terminal_state.type == StateType.COMPLETED: + print("Task one succeeded! Pausing flow run..") + pause_flow_run(timeout=2) + else: + print("Task one failed. Skipping pause flow run..") + +``` + + +Calling this flow will block code execution after the first task and wait for resumption to deliver the punchline. + +``` +await inspiring_joke() +> "a raft of ducks walk into a bar..." + +``` + + +Paused flow runs can be resumed by clicking the **Resume** button in the Prefect UI or calling the `resume_flow_run` utility via client code. + +``` +resume_flow_run(FLOW_RUN_ID) + +``` + + +The paused flow run will then finish! + +``` +> "it's a wonder none of them ducked!" + +``` + + +### Suspending a flow run + +Similar to pausing a flow run, Prefect enables suspending an in-progress flow run. + +**The difference between pausing and suspending a flow run** + +There is an important difference between pausing and suspending a flow run. When you pause a flow run, the flow code is still running but is _blocked_ until someone resumes the flow. This is not the case with suspending a flow run! When you suspend a flow run, the flow exits completely and the infrastructure running it (e.g., a Kubernetes Job) tears down. + +This means that you can suspend flow runs to save costs instead of paying for long-running infrastructure. However, when the flow run resumes, the flow code will execute again from the beginning of the flow, so you should use [tasks](https://docs.prefect.io/concepts/tasks/) and [task caching](https://docs.prefect.io/concepts/tasks/#caching) to avoid recomputing expensive operations. + +Prefect exposes this functionality via the [`suspend_flow_run`](https://docs.prefect.io/api-ref/prefect/engine/#prefect.engine.suspend_flow_run) and [`resume_flow_run`](https://docs.prefect.io/api-ref/prefect/engine/#prefect.engine.resume_flow_run) functions, as well as the Prefect UI. + +When called inside of a flow `suspend_flow_run` will immediately suspend execution of the flow run. The flow run will be marked as `Suspended` and will not be resumed until `resume_flow_run` is called. + +**Timeouts** + +Suspended flow runs time out after one hour by default. After the timeout, the flow run will fail with a message saying it suspended and never resumed. You can specify a different timeout period in seconds using the `timeout` parameter or pass `timeout=None` for no timeout. + +Here is an example of a flow that does not block flow execution while paused. This flow will exit after one task, and will be rescheduled upon resuming. The stored result of the first task is retrieved instead of being rerun. + +```python +from prefect import flow, pause_flow_run, task + +@task(persist_result=True) +def foo(): + return 42 + +@flow(persist_result=True) +def noblock_pausing(): + x = foo.submit() + pause_flow_run(timeout=30, reschedule=True) + y = foo.submit() + z = foo(wait_for=[x]) + alpha = foo(wait_for=[y]) + omega = foo(wait_for=[x, y]) + +``` + + +Flow runs can be suspended out-of-process by calling `suspend_flow_run(flow_run_id=)` or selecting the **Suspend** button in the Prefect UI or Prefect Cloud. + +Suspended flow runs can be resumed by clicking the **Resume** button in the Prefect UI or calling the `resume_flow_run` utility via client code. + +``` +resume_flow_run(FLOW_RUN_ID) + +``` + + +**Subflows can't be suspended independently of their parent run** + +You can't suspend a subflow run independently of its parent flow run. + +If you use a flow to schedule a flow run with `run_deployment`, the scheduled flow run will be linked to the calling flow as a subflow run by default. This means you won't be able to suspend the scheduled flow run independently of the calling flow. Call `run_deployment` with `as_subflow=False` to disable this linking if you need to be able to suspend the scheduled flow run independently of the calling flow. + + +Waiting for input when pausing or suspending a flow run +------------------------------------------------------------------------------------------------------------------------------------- + +**Experimental** + + +The `wait_for_input` parameter used in the `pause_flow_run` or `suspend_flow_run` functions is an experimental feature. The interface or behavior of this feature may change without warning in future releases. + +If you encounter any issues, please let us know in Slack or with a Github issue. + +When pausing or suspending a flow run you may want to wait for input from a user. Prefect provides a way to do this by leveraging the `pause_flow_run` and `suspend_flow_run` functions. These functions accept a `wait_for_input` argument, the value of which should be a subclass of `prefect.input.RunInput`, a pydantic model. When resuming the flow run, users are required to provide data for this model. Upon successful validation, the flow run resumes, and the return value of the `pause_flow_run` or `suspend_flow_run` is an instance of the model containing the provided data. + +Here is an example of a flow that pauses and waits for input from a user: + +```python +from prefect import flow, pause_flow_run +from prefect.input import RunInput + + +class UserNameInput(RunInput): + name: str + + +@flow(log_prints=True) +async def greet_user(): + user_input = await pause_flow_run( + wait_for_input=UserNameInput + ) + + print(f"Hello, {user_input.name}!") + +``` + + +Running this flow will create a flow run. The flow run will advance until code execution reaches `pause_flow_run`, at which point it will move into a `Paused` state. Execution will block and wait for resumption. + +When resuming the flow run, users will be prompted to provide a value for the `name` field of the `UserNameInput` model. Upon successful validation, the flow run will resume, and the return value of the `pause_flow_run` will be an instance of the `UserNameInput` model containing the provided data. + +For more in-depth information on receiving input from users when pausing and suspending flow runs, see the [Creating interactive workflows](https://docs.prefect.io/guides/creating-interactive-workflows/) guide. + +Canceling a flow run +--------------------------------------------------------------- + +You may cancel a scheduled or in-progress flow run from the CLI, UI, REST API, or Python client. + +When cancellation is requested, the flow run is moved to a "Cancelling" state. If the deployment is a work pool-based deployemnt with a worker, then the worker monitors the state of flow runs and detects that cancellation has been requested. The worker then sends a signal to the flow run infrastructure, requesting termination of the run. If the run does not terminate after a grace period (default of 30 seconds), the infrastructure will be killed, ensuring the flow run exits. + + +**A deployment is required** + +Flow run cancellation requires the flow run to be associated with a [deployment](#serving-a-flow). A monitoring process must be running to enforce the cancellation. Inline subflow runs, i.e. those created without `run_deployment`, cannot be cancelled without cancelling the parent flow run. If you may need to cancel a subflow run independent of its parent flow run, we recommend deploying it separately and starting it using the [run\_deployment](https://docs.prefect.io/api-ref/prefect/deployments/deployments/#prefect.deployments.deployments.run_deployment) function. + +Cancellation is robust to restarts of Prefect workers. To enable this, we attach metadata about the created infrastructure to the flow run. Internally, this is referred to as the `infrastructure_pid` or infrastructure identifier. Generally, this is composed of two parts: + +1. Scope: identifying where the infrastructure is running. +2. ID: a unique identifier for the infrastructure within the scope. + +The scope is used to ensure that Prefect does not kill the wrong infrastructure. For example, workers running on multiple machines may have overlapping process IDs but should not have a matching scope. + +The identifiers for infrastructure types: + +* Processes: The machine hostname and the PID. +* Docker Containers: The Docker API URL and container ID. +* Kubernetes Jobs: The Kubernetes cluster name and the job name. + +While the cancellation process is robust, there are a few issues than can occur: + +* If the infrastructure block for the flow run has been removed or altered, cancellation may not work. +* If the infrastructure block for the flow run does not have support for cancellation, cancellation will not work. +* If the identifier scope does not match when attempting to cancel a flow run the worker will be unable to cancel the flow run. Another worker may attempt cancellation. +* If the infrastructure associated with the run cannot be found or has already been killed, the worker will mark the flow run as cancelled. +* If the `infrastructre_pid` is missing from the flow run will be marked as cancelled but cancellation cannot be enforced. +* If the worker runs into an unexpected error during cancellation the flow run may or may not be cancelled depending on where the error occurred. The worker will try again to cancel the flow run. Another worker may attempt cancellation. + + +**Enhanced cancellation** + + +We are working on improving cases where cancellation can fail. You can try the improved cancellation experience by enabling the `PREFECT_EXPERIMENTAL_ENABLE_ENHANCED_CANCELLATION` setting on your worker or agents: + +```bash +prefect config set PREFECT_EXPERIMENTAL_ENABLE_ENHANCED_CANCELLATION=True + +``` + + +If you encounter any issues, please let us know in [Slack](https://www.prefect.io/slack/) or with a [Github](https://github.com/PrefectHQ/prefect) issue. + +### Cancel via the CLI + +From the command line in your execution environment, you can cancel a flow run by using the `prefect flow-run cancel` CLI command, passing the ID of the flow run. + +``` +prefect flow-run cancel 'a55a4804-9e3c-4042-8b59-b3b6b7618736' + +``` + + +### Cancel via the UI + +From the UI you can cancel a flow run by navigating to the flow run's detail page and clicking the `Cancel` button in the upper right corner. + +![Prefect UI](/images/flows4.png) + +Timeouts +--------------------------------------- + +Flow timeouts are used to prevent unintentional long-running flows. When the duration of execution for a flow exceeds the duration specified in the timeout, a timeout exception will be raised and the flow will be marked as failed. In the UI, the flow will be visibly designated as `TimedOut`. + +Timeout durations are specified using the `timeout_seconds` keyword argument. + +```python +from prefect import flow +import time + +@flow(timeout_seconds=1, log_prints=True) +def show_timeouts(): + print("I will execute") + time.sleep(5) + print("I will not execute") + +``` diff --git a/docs/2.19.x/concepts/results.mdx b/docs/2.19.x/concepts/results.mdx new file mode 100644 index 000000000000..a2e45d848b32 --- /dev/null +++ b/docs/2.19.x/concepts/results.mdx @@ -0,0 +1,729 @@ +--- +title: Results +description: Results represent the data returned by a flow or a task. +--- + + +Retrieving results +----------------------------------------------------------- + +When **calling** flows or tasks, the result is returned directly: + +```python +from prefect import flow, task + +@task +def my_task(): + return 1 + +@flow +def my_flow(): + task_result = my_task() + return task_result + 1 + +result = my_flow() +assert result == 2 + +``` + + +When working with flow and task states, the result can be retrieved with the `State.result()` method: + +```python +from prefect import flow, task + +@task +def my_task(): + return 1 + +@flow +def my_flow(): + state = my_task(return_state=True) + return state.result() + 1 + +state = my_flow(return_state=True) +assert state.result() == 2 + +``` + + +When submitting tasks to a runner, the result can be retrieved with the `Future.result()` method: + +```python +from prefect import flow, task + +@task +def my_task(): + return 1 + +@flow +def my_flow(): + future = my_task.submit() + return future.result() + 1 + +result = my_flow() +assert result == 2 + +``` + + +### Handling failures + +Sometimes your flows or tasks will encounter an **exception**. Prefect captures all exceptions in order to report states to the orchestrator, but we do not hide them from you (unless you ask us to) as your program needs to know if an unexpected error has occurred. + +When **calling** flows or tasks, the exceptions are raised as in normal Python: + +```python +from prefect import flow, task + +@task +def my_task(): + raise ValueError() + +@flow +def my_flow(): + try: + my_task() + except ValueError: + print("Oh no! The task failed.") + + return True + +my_flow() + +``` + + +If you would prefer to check for a failed task without using `try/except`, you may ask Prefect to return the state: + +```python +from prefect import flow, task + +@task +def my_task(): + raise ValueError() + +@flow +def my_flow(): + state = my_task(return_state=True) + + if state.is_failed(): + print("Oh no! The task failed. Falling back to '1'.") + result = 1 + else: + result = state.result() + + return result + 1 + +result = my_flow() +assert result == 2 + +``` + + +If you retrieve the result from a failed state, the exception will be raised. For this reason, it's often best to check if the state is failed first. + +```python +from prefect import flow, task + +@task +def my_task(): + raise ValueError() + +@flow +def my_flow(): + state = my_task(return_state=True) + + try: + result = state.result() + except ValueError: + print("Oh no! The state raised the error!") + + return True + +my_flow() + +``` + + +When retrieving the result from a state, you can ask Prefect not to raise exceptions: + +```python +from prefect import flow, task + +@task +def my_task(): + raise ValueError() + +@flow +def my_flow(): + state = my_task(return_state=True) + + maybe_result = state.result(raise_on_failure=False) + if isinstance(maybe_result, ValueError): + print("Oh no! The task failed. Falling back to '1'.") + result = 1 + else: + result = maybe_result + + return result + 1 + +result = my_flow() +assert result == 2 + +``` + + +When submitting tasks to a runner, `Future.result()` works the same as `State.result()`: + +```python +from prefect import flow, task + +@task +def my_task(): + raise ValueError() + +@flow +def my_flow(): + future = my_task.submit() + + try: + future.result() + except ValueError: + print("Ah! Futures will raise the failure as well.") + + # You can ask it not to raise the exception too + maybe_result = future.result(raise_on_failure=False) + print(f"Got {type(maybe_result)}") + + return True + +my_flow() + +``` + + +### Working with async results + +When **calling** flows or tasks, the result is returned directly: + +```python +import asyncio +from prefect import flow, task + +@task +async def my_task(): + return 1 + +@flow +async def my_flow(): + task_result = await my_task() + return task_result + 1 + +result = asyncio.run(my_flow()) +assert result == 2 + +``` + + +When working with flow and task states, the result can be retrieved with the `State.result()` method: + +```python +import asyncio +from prefect import flow, task + +@task +async def my_task(): + return 1 + +@flow +async def my_flow(): + state = await my_task(return_state=True) + result = await state.result(fetch=True) + return result + 1 + +async def main(): + state = await my_flow(return_state=True) + assert await state.result(fetch=True) == 2 + +asyncio.run(main()) + +``` + + +**Resolving results** + +Prefect 2.6.0 added automatic retrieval of persisted results. Prior to this version, `State.result()` did not require an `await`. For backwards compatibility, when used from an asynchronous context, `State.result()` returns a raw result type. + +You may opt-in to the new behavior by passing `fetch=True` as shown in the example above. If you would like this behavior to be used automatically, you may enable the `PREFECT_ASYNC_FETCH_STATE_RESULT` setting. If you do not opt-in to this behavior, you will see a warning. + +You may also opt-out by setting `fetch=False`. This will silence the warning, but you will need to retrieve your result manually from the result type. + + +When submitting tasks to a runner, the result can be retrieved with the `Future.result()` method: + +```python +import asyncio +from prefect import flow, task + +@task +async def my_task(): + return 1 + +@flow +async def my_flow(): + future = await my_task.submit() + result = await future.result() + return result + 1 + +result = asyncio.run(my_flow()) +assert result == 2 + +``` + + +Persisting results +----------------------------------------------------------- + +The Prefect API does not store your results [except in special cases](#storage-of-results-in-prefect). Instead, the result is _persisted_ to a storage location in your infrastructure and Prefect stores a _reference_ to the result. + +The following Prefect features require results to be persisted: + +* Task cache keys +* Flow run retries + +If results are not persisted, these features may not be usable. + +### Configuring persistence of results + +Persistence of results requires a [**serializer**](#result-serializer) and a [**storage** location](#result-storage-location). Prefect sets defaults for these, and you should not need to adjust them until you want to customize behavior. You can configure results on the `flow` and `task` decorators with the following options: + +* `persist_result`: Whether the result should be persisted to storage. +* `result_storage`: Where to store the result when persisted. +* `result_serializer`: How to convert the result to a storable form. + +#### Toggling persistence + +Persistence of the result of a task or flow can be configured with the `persist_result` option. The `persist_result` option defaults to a null value, which will automatically enable persistence if it is needed for a Prefect feature used by the flow or task. Otherwise, persistence is disabled by default. + +For example, the following flow has retries enabled. Flow retries require that all task results are persisted, so the task's result will be persisted: + +```python +from prefect import flow, task + +@task +def my_task(): + return "hello world!" + +@flow(retries=2) +def my_flow(): + # This task does not have persistence toggled off and it is needed for the flow feature, + # so Prefect will persist its result at runtime + my_task() + +``` + + +Flow retries do not require the flow's result to be persisted, so it will not be. + +In this next example, one task has caching enabled. Task caching requires that the given task's result is persisted: + +```python +from prefect import flow, task +from datetime import timedelta + +@task(cache_key_fn=lambda: "always", cache_expiration=timedelta(seconds=20)) +def my_task(): + # This task uses caching so its result will be persisted by default + return "hello world!" + + +@task +def my_other_task(): + ... + +@flow +def my_flow(): + # This task uses a feature that requires result persistence + my_task() + + # This task does not use a feature that requires result persistence and the + # flow does not use any features that require task result persistence so its + # result will not be persisted by default + my_other_task() + +``` + + +Persistence of results can be manually toggled on or off: + +```python +from prefect import flow, task + +@flow(persist_result=True) +def my_flow(): + # This flow will persist its result even if not necessary for a feature. + ... + +@task(persist_result=False) +def my_task(): + # This task will never persist its result. + # If persistence needed for a feature, an error will be raised. + ... + +``` + + +Toggling persistence manually will always override any behavior that Prefect would infer. + +You may also change Prefect's default persistence behavior with the `PREFECT_RESULTS_PERSIST_BY_DEFAULT` setting. To persist results by default, even if they are not needed for a feature change the value to a truthy value: + +``` +prefect config set PREFECT_RESULTS_PERSIST_BY_DEFAULT=true + +``` + + +Task and flows with `persist_result=False` will not persist their results even if `PREFECT_RESULTS_PERSIST_BY_DEFAULT` is `true`. + +#### Result storage location + +[The result storage location](#result-storage-types) can be configured with the `result_storage` option. The `result_storage` option defaults to a null value, which infers storage from the context. Generally, this means that tasks will use the result storage configured on the flow unless otherwise specified. If there is no context to load the storage from and results must be persisted, results will be stored in the path specified by the `PREFECT_LOCAL_STORAGE_PATH` setting (defaults to `~/.prefect/storage`). + +```python +from prefect import flow, task +from prefect.filesystems import LocalFileSystem, S3 + +@flow(persist_result=True) +def my_flow(): + my_task() # This task will use the flow's result storage + +@task(persist_result=True) +def my_task(): + ... + +my_flow() # The flow has no result storage configured and no parent, the local file system will be used. + + +# Reconfigure the flow to use a different storage type +new_flow = my_flow.with_options(result_storage=S3(bucket_path="my-bucket")) + +new_flow() # The flow and task within it will use S3 for result storage. + +``` + + +You can configure this to use a specific storage using one of the following: + +* A storage instance, e.g. `LocalFileSystem(basepath=".my-results")` +* A storage slug, e.g. `'s3/dev-s3-block'` + +#### Result storage key + +The path of the result file in the result storage can be configured with the `result_storage_key`. The `result_storage_key` option defaults to a null value, which generates a unique identifier for each result. + +```python +from prefect import flow, task +from prefect.filesystems import LocalFileSystem, S3 + +@flow(result_storage=S3(bucket_path="my-bucket")) +def my_flow(): + my_task() + +@task(persist_result=True, result_storage_key="my_task.json") +def my_task(): + ... + +my_flow() # The task's result will be persisted to 's3://my-bucket/my_task.json' + +``` + + +Result storage keys are formatted with access to all of the modules in `prefect.runtime` and the run's `parameters`. In the following example, we will run a flow with three runs of the same task. Each task run will write its result to a unique file based on the `name` parameter. + +```python +from prefect import flow, task + +@flow() +def my_flow(): + hello_world() + hello_world(name="foo") + hello_world(name="bar") + +@task(persist_result=True, result_storage_key="hello-{parameters[name]}.json") +def hello_world(name: str = "world"): + return f"hello {name}" + +my_flow() + +``` + + +After running the flow, we can see three persisted result files in our storage directory: + +``` +$ ls ~/.prefect/storage | grep "hello-" +hello-bar.json +hello-foo.json +hello-world.json + +``` + + +In the next example, we include metadata about the flow run from the `prefect.runtime.flow_run` module: + +```python +from prefect import flow, task + +@flow +def my_flow(): + hello_world() + +@task(persist_result=True, result_storage_key="{flow_run.flow_name}_{flow_run.name}_hello.json") +def hello_world(name: str = "world"): + return f"hello {name}" + +my_flow() + +``` + + +After running this flow, we can see a result file templated with the name of the flow and the flow run: + +``` +❯ ls ~/.prefect/storage | grep "my-flow" +my-flow_industrious-trout_hello.json + +``` + + +If a result exists at a given storage key in the storage location, it will be overwritten. + +Result storage keys can only be configured on tasks at this time. + +#### Result serializer + +[The result serializer](#result-serializer-types) can be configured with the `result_serializer` option. The `result_serializer` option defaults to a null value, which infers the serializer from the context. Generally, this means that tasks will use the result serializer configured on the flow unless otherwise specified. If there is no context to load the serializer from, the serializer defined by `PREFECT_RESULTS_DEFAULT_SERIALIZER` will be used. This setting defaults to Prefect's pickle serializer. + +You may configure the result serializer using: + +* A type name, e.g. `"json"` or `"pickle"` — this corresponds to an instance with default values +* An instance, e.g. `JSONSerializer(jsonlib="orjson")` + +#### Compressing results + +Prefect provides a `CompressedSerializer` which can be used to _wrap_ other serializers to provide compression over the bytes they generate. The compressed serializer uses `lzma` compression by default. We test other compression schemes provided in the Python standard library such as `bz2` and `zlib`, but you should be able to use any compression library that provides `compress` and `decompress` methods. + +You may configure compression of results using: + +* A type name, prefixed with `compressed/` e.g. `"compressed/json"` or `"compressed/pickle"` +* An instance e.g. `CompressedSerializer(serializer="pickle", compressionlib="lzma")` + +Note that the `"compressed/"` shortcut will only work for serializers provided by Prefect. If you are using custom serializers, you must pass a full instance. + +### Storage of results in Prefect + +The Prefect API does not store your results in most cases for the following reasons: + +* Results can be large and slow to send to and from the API. +* Results often contain private information or data. +* Results would need to be stored in the database or complex logic implemented to hydrate from another source. + +There are a few cases where Prefect _will_ store your results directly in the database. This is an optimization to reduce the overhead of reading and writing to result storage. + +The following data types will be stored by the API without persistence to storage: + +* booleans (`True`, `False`) +* nulls (`None`) + +If `persist_result` is set to `False`, these values will never be stored. + +Tracking results +------------------------------------------------------- + +The Prefect API tracks metadata about your results. The value of your result is only stored in [specific cases](#storage-of-results-in-prefect). Result metadata can be seen in the UI on the "Results" page for flows. + +Prefect tracks the following result metadata: + +* Data type +* Storage location (if persisted) + +Caching of results in memory +------------------------------------------------------------------------------- + +When running your workflows, Prefect will keep the results of all tasks and flows in memory so they can be passed downstream. In some cases, it is desirable to override this behavior. For example, if you are returning a large amount of data from a task it can be costly to keep it memory for the entire duration of the flow run. + +Flows and tasks both include an option to drop the result from memory with `cache_result_in_memory`: + +``` +@flow(cache_result_in_memory=False) +def foo(): + return "pretend this is large data" + +@task(cache_result_in_memory=False) +def bar(): + return "pretend this is biiiig data" + +``` + + +When `cache_result_in_memory` is disabled, the result of your flow or task will be persisted by default. The result will then be pulled from storage when needed. + +```python +@flow +def foo(): + result = bar() + state = bar(return_state=True) + + # The result will be retrieved from storage here + state.result() + + future = bar.submit() + # The result will be retrieved from storage here + future.result() + +@task(cache_result_in_memory=False) +def bar(): + # This result will persisted + return "pretend this is biiiig data" + +``` + + +If both `cache_result_in_memory` and persistence are disabled, your results will not be available downstream. + +```python +@task(persist_result=False, cache_result_in_memory=False) +def bar(): + return "pretend this is biiiig data" + +@flow +def foo(): + # Raises an error + result = bar() + + # This is oaky + state = bar(return_state=True) + + # Raises an error + state.result() + + # This is okay + future = bar.submit() + + # Raises an error + future.result() + +``` + + +Result storage types +--------------------------------------------------------------- + +Result storage is responsible for reading and writing serialized data to an external location. At this time, any file system block can be used for result storage. + +Result serializer types +--------------------------------------------------------------------- + +A result serializer is responsible for converting your Python object to and from bytes. This is necessary to store the object outside of Python and retrieve it later. + +### Pickle serializer + +Pickle is a standard Python protocol for encoding arbitrary Python objects. We supply a custom pickle serializer at `prefect.serializers.PickleSerializer`. Prefect's pickle serializer uses the [cloudpickle](https://github.com/cloudpipe/cloudpickle) project by default to support more object types. Alternative pickle libraries can be specified: + +``` +from prefect.serializers import PickleSerializer + +PickleSerializer(picklelib="custompickle") + +``` + + +Benefits of the pickle serializer: + +* Many object types are supported. +* Objects can define custom pickle support. + +Drawbacks of the pickle serializer: + +* When nested attributes of an object cannot be pickled, it is hard to determine the cause. +* When deserializing objects, your Python and pickle library versions must match the one used at serialization time. +* Serialized objects cannot be easily shared across different programming languages. +* Serialized objects are not human readable. + +### JSON serializer + +We supply a custom JSON serializer at `prefect.serializers.JSONSerializer`. Prefect's JSON serializer uses custom hooks by default to support more object types. Specifically, we add support for all types supported by [Pydantic](https://pydantic-docs.helpmanual.io/). + +By default, we use the standard Python `json` library. Alternative JSON libraries can be specified: + +``` +from prefect.serializers import JSONSerializer + +JSONSerializer(jsonlib="orjson") + +``` + + +Benefits of the JSON serializer: + +* Serialized objects are human readable. +* Serialized objects can often be shared across different programming languages. +* Deserialization of serialized objects is generally version agnostic. + +Drawbacks of the JSON serializer: + +* Supported types are limited. +* Implementing support for additional types must be done at the serializer level. + +Result types +----------------------------------------------- + +Prefect uses internal result types to capture information about the result attached to a state. The following types are used: + +* `UnpersistedResult`: Stores result metadata but the value is only available when created. +* `LiteralResult`: Stores simple values inline. +* `PersistedResult`: Stores a reference to a result persisted to storage. + +All result types include a `get()` method that can be called to return the value of the result. This is done behind the scenes when the `result()` method is used on states or futures. + +### Unpersisted results + +Unpersisted results are used to represent results that have not been and will not be persisted beyond the current flow run. The value associated with the result is stored in memory, but will not be available later. Result metadata is attached to this object for storage in the API and representation in the UI. + +### Literal results + +Literal results are used to represent [results stored in the Prefect database](#storage-of-results-in-prefect). The values contained by these results must always be JSON serializable. + +Example: + +```python +result = LiteralResult(value=None) +result.json() +# {"type": "result", "value": "null"} + +``` + + +Literal results reduce the overhead required to persist simple results. + +### Persisted results + +The persisted result type contains all of the information needed to retrieve the result from storage. This includes: + +* Storage: A reference to the [result storage](#result-storage-types) that can be used to read the serialized result. +* Key: Indicates where this specific result is in storage. + +Persisted result types also contain metadata for inspection without retrieving the result: + +* Serializer type: The name of the [result serializer](#result-serializer-types) type. + +The `get()` method on result references retrieves the data from storage, deserializes it, and returns the original object. The `get()` operation will cache the resolved object to reduce the overhead of subsequent calls. + +#### Persisted result blob + +When results are persisted to storage, they are always written as a JSON document. The schema for this is described by the `PersistedResultBlob` type. The document contains: + +* The serialized data of the result. +* A full description of [result serializer](#result-serializer-types) that can be used to deserialize the result data. +* The Prefect version used to create the result. \ No newline at end of file diff --git a/docs/2.19.x/concepts/schedules.mdx b/docs/2.19.x/concepts/schedules.mdx new file mode 100644 index 000000000000..dbe7bf49b69f --- /dev/null +++ b/docs/2.19.x/concepts/schedules.mdx @@ -0,0 +1,316 @@ +--- +title: Schedules +--- + +Scheduling is one of the primary reasons for using an orchestrator such as Prefect. Prefect allows you to use schedules to automatically create new flow runs for deployments. + +Prefect Cloud can also schedule flow runs through event-driven [automations](https://docs.prefect.io/concepts/automations/). + +Schedules tell the Prefect API how to create new flow runs for you automatically on a specified cadence. + +You can add a schedule to any [deployment](https://docs.prefect.io/concepts/deployments/). The Prefect `Scheduler` service periodically reviews every deployment and creates new flow runs according to the schedule configured for the deployment. + + +**Support for multiple schedules** + +We are currently rolling out support for multiple schedules per deployment. You can now assign multiple schedules to deployments in the Prefect UI, the CLI via `prefect deployment schedule` commands, the `Deployment` class, and in [block-based deployment](https://docs.prefect.io/concepts/deployments/#block-based-deployments) YAML files. + +Support for multiple schedules in `flow.serve`, `flow.deploy`, `serve`, and [worker-based deployments](https://docs.prefect.io/concepts/work-pools/) with `prefect deploy` will arrive soon. + + +Schedule types +--------------------------------------------------- + +Prefect supports several types of schedules that cover a wide range of use cases and offer a large degree of customization: + +* [`Cron`](#cron) is most appropriate for users who are already familiar with `cron` from previous use. +* [`Interval`](#interval) is best suited for deployments that need to run at some consistent cadence that isn't related to absolute time. +* [`RRule`](#rrule) is best suited for deployments that rely on calendar logic for simple recurring schedules, irregular intervals, exclusions, or day-of-month adjustments. + + +**Schedules can be inactive** + +When you create or edit a schedule, you can set the `active` property to `False` in Python (or `false` in a YAML file) to deactivate the schedule. This is useful if you want to keep the schedule configuration but temporarily stop the schedule from creating new flow runs. + +### Cron + +A schedule may be specified with a [`cron`](https://en.wikipedia.org/wiki/Cron) pattern. Users may also provide a timezone to enforce DST behaviors. + +`Cron` uses [`croniter`](https://github.com/kiorky/croniter) to specify datetime iteration with a `cron`\-like format. + +`Cron` properties include: + + +|Property|Description | +|--------|----------------------------------------------------------------------------------------------------------------------| +|cron |A valid `cron` string. (Required) | +|day_or |Boolean indicating how `croniter` handles `day` and `day_of_week` entries. Default is `True`. | +|timezone|String name of a time zone. (See the [IANA Time Zone Database](https://www.iana.org/time-zones) for valid time zones.)| + + +#### How the `day_or` property works + +The `day_or` property defaults to `True`, matching the behavior of `cron`. In this mode, if you specify a `day` (of the month) entry and a `day_of_week` entry, the schedule will run a flow on both the specified day of the month _and_ on the specified day of the week. The "or" in `day_or` refers to the fact that the two entries are treated like an `OR` statement, so the schedule should include both, as in the SQL statement `SELECT * FROM employees WHERE first_name = 'Xiāng' OR last_name = 'Brookins';`. + +For example, with `day_or` set to `True`, the cron schedule `* * 3 1 2` runs a flow every minute on the 3rd day of the month (whatever that is) and on Tuesday (the second day of the week) in January (the first month of the year). + +With `day_or` set to `False`, the `day` (of the month) and `day_of_week` entries are joined with the more restrictive `AND` operation, as in the SQL statement `SELECT * from employees WHERE first_name = 'Andrew' AND last_name = 'Brookins';`. For example, the same schedule, when `day_or` is `False`, runs a flow on every minute on the **3rd Tuesday** in January. This behavior matches `fcron` instead of `cron`. + + +**Supported `croniter` features** + +While Prefect supports most features of `croniter` for creating `cron`\-like schedules, we do not currently support "R" random or "H" hashed keyword expressions or the schedule jittering possible with those expressions. + + + +**Daylight saving time considerations** + +If the `timezone` is a DST-observing one, then the schedule will adjust itself appropriately. + +The `cron` rules for DST are based on schedule times, not intervals. This means that an hourly `cron` schedule fires on every new schedule hour, not every elapsed hour. For example, when clocks are set back, this results in a two-hour pause as the schedule will fire _the first time_ 1am is reached and _the first time_ 2am is reached, 120 minutes later. + +Longer schedules, such as one that fires at 9am every morning, will adjust for DST automatically. + + +### Interval + +An `Interval` schedule creates new flow runs on a regular interval measured in seconds. Intervals are computed using an optional `anchor_date`. For example, here's how you can create a schedule for every 10 minutes in a [block-based deployment](https://docs.prefect.io/concepts/deployments/#block-based-deployments) YAML file: + +```python +schedule: + interval: 600 + timezone: America/Chicago + +``` + + +`Interval` properties include: + +| Property | Description | +|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `interval` | `datetime.timedelta` indicating the time between flow runs. (Required) | +| `anchor_date` | `datetime.datetime` indicating the starting or "anchor" date to begin the schedule. If no `anchor_date` is supplied, the current UTC time is used. | +| `timezone` | String name of a time zone, used to enforce localization behaviors like DST boundaries. (See the [IANA Time Zone Database(https://www.iana.org/time-zones) for valid time zones.) | + +Note that the `anchor_date` does not indicate a "start time" for the schedule, but rather a fixed point in time from which to compute intervals. If the anchor date is in the future, then schedule dates are computed by subtracting the `interval` from it. Note that in this example, we import the [Pendulum](https://pendulum.eustace.io/) Python package for easy datetime manipulation. Pendulum isn’t required, but it’s a useful tool for specifying dates. + + +**Daylight saving time considerations** + +If the schedule's `anchor_date` or `timezone` are provided with a DST-observing timezone, then the schedule will adjust itself appropriately. Intervals greater than 24 hours will follow DST conventions, while intervals of less than 24 hours will follow UTC intervals. + +For example, an hourly schedule will fire every UTC hour, even across DST boundaries. When clocks are set back, this will result in two runs that _appear_ to both be scheduled for 1am local time, even though they are an hour apart in UTC time. + +For longer intervals, like a daily schedule, the interval schedule will adjust for DST boundaries so that the clock-hour remains constant. This means that a daily schedule that always fires at 9am will observe DST and continue to fire at 9am in the local time zone. + + +### RRule + +An `RRule` scheduling supports [iCal recurrence rules](https://icalendar.org/iCalendar-RFC-5545/3-8-5-3-recurrence-rule.html) (RRules), which provide convenient syntax for creating repetitive schedules. Schedules can repeat on a frequency from yearly down to every minute. + +`RRule` uses the [dateutil rrule](https://dateutil.readthedocs.io/en/stable/rrule.html) module to specify iCal recurrence rules. + +RRules are appropriate for any kind of calendar-date manipulation, including simple repetition, irregular intervals, exclusions, week day or day-of-month adjustments, and more. RRules can represent complex logic like: + +* The last weekday of each month +* The fourth Thursday of November +* Every other day of the week + +`RRule` properties include: + + +|Property |Description | +|----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`rrule` |String representation of an RRule schedule. See the [`rrulestr examples`](https://dateutil.readthedocs.io/en/stable/rrule.html#rrulestr-examples) for syntax.| +|`timezone`|String name of a time zone. See the [IANA Time Zone Database](https://www.iana.org/time-zones) for valid time zones. | + + +You may find it useful to use an RRule string generator such as the [iCalendar.org RRule Tool](https://icalendar.org/rrule-tool.html) to help create valid RRules. + +For example, the following RRule schedule in a [block-based deployment](https://docs.prefect.io/concepts/deployments/#block-based-deployments) YAML file creates flow runs on Monday, Wednesday, and Friday until July 30, 2024. + +```python +schedule: + rrule: 'FREQ=WEEKLY;BYDAY=MO,WE,FR;UNTIL=20240730T040000Z' + +``` + + +**RRule restrictions** + +Note the max supported character length of an `rrulestr` is 6500 characters + +Note that `COUNT` is not supported. Please use `UNTIL` or the `/deployments/{id}/runs` endpoint to schedule a fixed number of flow runs. + + + +**Daylight saving time considerations** + +Note that as a calendar-oriented standard, `RRules` are sensitive to the initial timezone provided. A 9am daily schedule with a DST-aware start date will maintain a local 9am time through DST boundaries. A 9am daily schedule with a UTC start date will maintain a 9am UTC time. + + +Creating schedules +----------------------------------------------------------- + +There are several ways to create a schedule for a deployment: + +* Through the Prefect UI +* Via the `cron`, `interval`, or `rrule` parameters if building your deployment via the [`serve` method](https://docs.prefect.io/concepts/flows/#serving-a-flow) of the `Flow` object or [the `serve` utility](https://docs.prefect.io/concepts/flows/#serving-multiple-flows-at-once) for managing multiple flows simultaneously +* If using [worker-based deployments](https://docs.prefect.io/concepts/work-pools/) +* When you define a deployment with `flow.serve` or `flow.deploy` +* Through the interactive `prefect deploy` command +* With the `deployments` -> `schedules` section of the `prefect.yaml` file +* If using [block-based deployments](https://docs.prefect.io/concepts/deployments/#block-based-deployments) - Deprecated +* `Through the schedules` section of the deployment YAML file +* By passing `schedules` into the `Deployment` class or `Deployment.build_from_flow` + +### Creating schedules in the UI + +You can add schedules in the **Schedules** section on a **Deployment** page in the UI. + +#### Locating the **Schedules** section + +The **Schedules** section appears in the sidebar on the right side of the page on wider displays. On narrower displays, it appears on the **Details** tab of the page. + +#### Adding a schedule + +Under **Schedules**, select the **\+ Schedule** button. A modal dialog will open. Choose **Interval** or **Cron** to create a schedule. + +![Prefect UI with Interval button selected](/images/schedules1.png) + +**What about RRule?** + +The UI does not support creating RRule schedules. However, the UI will display RRule schedules that you've created via the command line. + +The new schedule will appear on the **Deployment** page where you created it. In addition, the schedule will be viewable in human-friendly text in the list of deployments on the **Deployments** page. + +After you create a schedule, new scheduled flow runs will be visible in the **Upcoming** tab of the **Deployment** page where you created it. + +#### Editing schedules + +You can edit a schedule by selecting **Edit** from the three-dot menu next to a schedule on a **Deployment** page. + +### Creating schedules with a Python deployment creation file + +When you create a deployment in a Python file with `flow.serve()`, `serve`, `flow.deploy()`, or `deploy` you can specify the schedule. Just add the keyword argument `cron`, `interval`, or `rrule`. + +```python +interval: An interval on which to execute the deployment. Accepts a number or a + timedelta object to create a single schedule. If a number is given, it will be + interpreted as seconds. Also accepts an iterable of numbers or timedelta to create + multiple schedules. +cron: A cron schedule string of when to execute runs of this deployment. + Also accepts an iterable of cron schedule strings to create multiple schedules. +rrule: An rrule schedule string of when to execute runs of this deployment. + Also accepts an iterable of rrule schedule strings to create multiple schedules. +schedules: A list of schedule objects defining when to execute runs of this deployment. + Used to define multiple schedules or additional scheduling options such as `timezone`. +schedule: A schedule object defining when to execute runs of this deployment. Used to + define additional scheduling options like `timezone`. + +``` + + +Here's an example of creating a cron schedule with `serve` for a deployment flow that will run every minute of every day: + +```python +my_flow.serve(name="flowing", cron="* * * * *") + +``` + + +If using work pool-based deployments, the `deploy` method has the same schedule-based parameters. + +Here's an example of creating an interval schedule with `serve` for a deployment flow that will run every 10 minutes with an anchor date and a timezone: + +```python +from datetime import timedelta, datetime +from prefect.client.schemas.schedules import IntervalSchedule + +my_flow.serve(name="flowing", schedule=IntervalSchedule(interval=timedelta(minutes=10), anchor_date=datetime(2023, 1, 1, 0, 0), timezone="America/Chicago")) + +``` + + +Block and agent-based deployments with Python files are not a recommended way to create deployments. However, if you are using that deployment creation method you can create a schedule by passing a `schedule` argument to the `Deployment.build_from_flow` method. + +Here's how you create the equivalent schedule in a Python deployment file. + +```python +from prefect.client.schemas.schedules import CronSchedule + +cron_demo = Deployment.build_from_flow( + pipeline, + "etl", + schedule=(CronSchedule(cron="0 0 * * *", timezone="America/Chicago")) +) + +``` + + +### Creating schedules with the interactive `prefect deploy` command + +If you are using [worker-based deployments](https://docs.prefect.io/concepts/work-pools/), you can create a schedule through the interactive `prefect deploy` command. You will be prompted to choose which type of schedule to create. + +### Creating schedules in the `prefect.yaml` file's `deployments` -> `schedule` section + +If you save the `prefect.yaml` file from the `prefect deploy` command, you will see it has a `schedules` section for your deployment. Alternatively, you can create a `prefect.yaml` file from a recipe or from scratch and add a `schedules` section to it. + +```python +deployments: + ... + schedules: + - cron: "0 0 * * *" + timezone: "America/Chicago" + active: false + - cron: "0 12 * * *" + timezone: "America/New_York" + active: true + - cron: "0 18 * * *" + timezone: "Europe/London" + active: true + +``` + + +The `Scheduler` service +------------------------------------------------------------------- + +The `Scheduler` service is started automatically when `prefect server start` is run and it is a built-in service of Prefect Cloud. + +By default, the `Scheduler` service visits deployments on a 60-second loop, though recently-modified deployments will be visited more frequently. The `Scheduler` evaluates each deployment's schedules and creates new runs appropriately. For typical deployments, it will create the next three runs, though more runs will be scheduled if the next 3 would all start in the next hour. + +More specifically, the `Scheduler` tries to create the smallest number of runs that satisfy the following constraints, in order: + +* No more than 100 runs will be scheduled. +* Runs will not be scheduled more than 100 days in the future. +* At least 3 runs will be scheduled. +* Runs will be scheduled until at least one hour in the future. + +These behaviors can all be adjusted through the relevant settings that can be viewed with the terminal command `prefect config view --show-defaults`: + +```python +PREFECT_API_SERVICES_SCHEDULER_DEPLOYMENT_BATCH_SIZE='100' +PREFECT_API_SERVICES_SCHEDULER_ENABLED='True' +PREFECT_API_SERVICES_SCHEDULER_INSERT_BATCH_SIZE='500' +PREFECT_API_SERVICES_SCHEDULER_LOOP_SECONDS='60.0' +PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS='3' +PREFECT_API_SERVICES_SCHEDULER_MAX_RUNS='100' +PREFECT_API_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME='1:00:00' +PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME='100 days, 0:00:00' + +``` + + +See the [Settings docs](https://docs.prefect.io/concepts/settings/) for more information on altering your settings. + +These settings mean that if a deployment has an hourly schedule, the default settings will create runs for the next 4 days (or 100 hours). If it has a weekly schedule, the default settings will maintain the next 14 runs (up to 100 days in the future). + +**The `Scheduler` does not affect execution** + +The Prefect `Scheduler` service only creates new flow runs and places them in `Scheduled` states. It is not involved in flow or task execution. + + +If you change a schedule, previously scheduled flow runs that have not started are removed, and new scheduled flow runs are created to reflect the new schedule. + +To remove all scheduled runs for a flow deployment, you can remove the schedule via the UI. \ No newline at end of file diff --git a/docs/2.19.x/concepts/states.mdx b/docs/2.19.x/concepts/states.mdx new file mode 100644 index 000000000000..914020bf2728 --- /dev/null +++ b/docs/2.19.x/concepts/states.mdx @@ -0,0 +1,337 @@ +--- +title: States +--- + +Overview +--------------------------------------- + +States are rich objects that contain information about the status of a particular [task](https://docs.prefect.io/concepts/tasks) run or [flow](https://docs.prefect.io/concepts/flows/) run. While you don't need to know the details of the states to use Prefect, you can give your workflows superpowers by taking advantage of it. + +At any moment, you can learn anything you need to know about a task or flow by examining its current state or the history of its states. For example, a state could tell you that a task: + +* is scheduled to make a third run attempt in an hour + +* succeeded and what data it produced + +* was scheduled to run, but later cancelled + +* used the cached result of a previous run instead of re-running + +* failed because it timed out + + +By manipulating a relatively small number of task states, Prefect flows can harness the complexity that emerges in workflows. + +**Only runs have states** + +Though we often refer to the "state" of a flow or a task, what we really mean is the state of a flow _run_ or a task _run_. Flows and tasks are templates that describe what a system does; only when we run the system does it also take on a state. So while we might refer to a task as "running" or being "successful", we really mean that a specific instance of the task is in that state. + + +State Types +--------------------------------------------- + +States have names and types. State types are canonical, with specific orchestration rules that apply to transitions into and out of each state type. A state's name, is often, but not always, synonymous with its type. For example, a task run that is running for the first time has a state with the name Running and the type `RUNNING`. However, if the task retries, that same task run will have the name Retrying and the type `RUNNING`. Each time the task run transitions into the `RUNNING` state, the same orchestration rules are applied. + +There are terminal state types from which there are no orchestrated transitions to any other state type. + +* `COMPLETED` +* `CANCELLED` +* `FAILED` +* `CRASHED` + +The full complement of states and state types includes: + +| Name | Type | Terminal? | Description | +|---------------|------------|-----------|------------------------------------------------------------------------------------------------------------| +| Scheduled | SCHEDULED | No | The run will begin at a particular time in the future. | +| Late | SCHEDULED | No | The run's scheduled start time has passed, but it has not transitioned to PENDING (15 seconds by default). | +| AwaitingRetry | SCHEDULED | No | The run did not complete successfully because of a code issue and had remaining retry attempts. | +| Pending | PENDING | No | The run has been submitted to run, but is waiting on necessary preconditions to be satisfied. | +| Running | RUNNING | No | The run code is currently executing. | +| Retrying | RUNNING | No | The run code is currently executing after previously not complete successfully. | +| Paused | PAUSED | No | The run code has stopped executing until it receives manual approval to proceed. | +| Cancelling | CANCELLING | No | The infrastructure on which the code was running is being cleaned up. | +| Cancelled | CANCELLED | Yes | The run did not complete because a user determined that it should not. | +| Completed | COMPLETED | Yes | The run completed successfully. | +| Failed | FAILED | Yes | The run did not complete because of a code issue and had no remaining retry attempts. | +| Crashed | CRASHED | Yes | The run did not complete because of an infrastructure issue. | + + +Returned values +----------------------------------------------------- + +When calling a task or a flow, there are three types of returned values: + +* Data: A Python object (such as `int`, `str`, `dict`, `list`, and so on). +* `State`: A Prefect object indicating the state of a flow or task run. +* [`PrefectFuture`](https://docs.prefect.io/api-ref/prefect/futures/#prefect.futures.PrefectFuture): A Prefect object that contains both _data_ and _State_. + +Returning data  is the default behavior any time you call `your_task()`. + +Returning Prefect [`State`](https://docs.prefect.io/api-ref/server/schemas/states/) occurs anytime you call your task or flow with the argument `return_state=True`. + +Returning [`PrefectFuture`](https://docs.prefect.io/api-ref/prefect/futures/#prefect.futures.PrefectFuture) is achieved by calling `your_task.submit()`. + +### Return Data + +By default, running a task will return data: + +```python +from prefect import flow, task + +@task +def add_one(x): + return x + 1 + +@flow +def my_flow(): + result = add_one(1) # return int + +``` + + +The same rule applies for a subflow: + +```python +@flow +def subflow(): + return 42 + +@flow +def my_flow(): + result = subflow() # return data + +``` + + +### Return Prefect State + +To return a `State` instead, add `return_state=True` as a parameter of your task call. + +```python +@flow +def my_flow(): + state = add_one(1, return_state=True) # return State + +``` + + +To get data from a `State`, call `.result()`. + +```python +@flow +def my_flow(): + state = add_one(1, return_state=True) # return State + result = state.result() # return int + +``` + + +The same rule applies for a subflow: + +```python +@flow +def subflow(): + return 42 + +@flow +def my_flow(): + state = subflow(return_state=True) # return State + result = state.result() # return int + +``` + + +### Return a PrefectFuture + +To get a `PrefectFuture`, add `.submit()` to your task call. + +```python +@flow +def my_flow(): + future = add_one.submit(1) # return PrefectFuture + +``` + + +To get data from a `PrefectFuture`, call `.result()`. + +```python +@flow +def my_flow(): + future = add_one.submit(1) # return PrefectFuture + result = future.result() # return data + +``` + + +To get a `State` from a `PrefectFuture`, call `.wait()`. + +```python +@flow +def my_flow(): + future = add_one.submit(1) # return PrefectFuture + state = future.wait() # return State + +``` + + +Final state determination +------------------------------------------------------------------------- + +The final state of a flow is determined by its return value. The following rules apply: + +* If an exception is raised directly in the flow function, the flow run is marked as `FAILED`. +* If the flow does not return a value (or returns `None`), its state is determined by the states of all of the tasks and subflows within it. +* If _any_ task run or subflow run failed and none were cancelled, then the final flow run state is marked as `FAILED`. +* If _any_ task run or subflow run was cancelled, then the final flow run state is marked as `CANCELLED`. +* If a flow returns a manually created state, it is used as the state of the final flow run. This allows for manual determination of final state. +* If the flow run returns _any other object_, then it is marked as successfully completed. + +See the [Final state determination](https://docs.prefect.io/concepts/flows/#final-state-determination) section of the [Flows](https://docs.prefect.io/concepts/flows/) documentation for further details and examples. + +State Change Hooks +----------------------------------------------------------- + +State change hooks execute code in response to changes in flow or task run states, enabling you to define actions for specific state transitions in a workflow. + +#### A simple example + +```python +from prefect import flow + +def my_success_hook(flow, flow_run, state): + print("Flow run succeeded!") + +@flow(on_completion=[my_success_hook]) +def my_flow(): + return 42 + +my_flow() + +``` + + +### Create and use hooks + +#### Available state change hooks + + +| Type | Flow | Task | Description | +|-------------------|------|------|------------------------------------------------------------| +| `on_completion` | ✓ | ✓ | Executes when a flow or task run enters a Completed state. | +| `on_failure` | ✓ | ✓ | Executes when a flow or task run enters a Failed state. | +| `on_cancellation` | ✓ | - | Executes when a flow run enters a Cancelling state. | +| `on_crashed` | ✓ | - | Executes when a flow run enters a Crashed state. | +| `on_running` | ✓ | - | Executes when a flow run enters a Running state. | + +#### Create flow run state change hooks + +```python +def my_flow_hook(flow: Flow, flow_run: FlowRun, state: State): + """This is the required signature for a flow run state + change hook. This hook can only be passed into flows. + """ + +# pass hook as a list of callables +@flow(on_completion=[my_flow_hook]) + +``` + + +#### Create task run state change hooks + +```python +def my_task_hook(task: Task, task_run: TaskRun, state: State): + """This is the required signature for a task run state change + hook. This hook can only be passed into tasks. + """ + +# pass hook as a list of callables +@task(on_failure=[my_task_hook]) + +``` + + +#### Use multiple state change hooks + +State change hooks are versatile, allowing you to specify multiple state change hooks for the same state transition, or to use the same state change hook for different transitions: + +```python +def my_success_hook(task, task_run, state): + print("Task run succeeded!") + +def my_failure_hook(task, task_run, state): + print("Task run failed!") + +def my_succeed_or_fail_hook(task, task_run, state): + print("If the task run succeeds or fails, this hook runs.") + +@task( + on_completion=[my_success_hook, my_succeed_or_fail_hook], + on_failure=[my_failure_hook, my_succeed_or_fail_hook] +) + +``` + + +#### Pass `kwargs` to your hooks + +The Prefect engine will call your hooks for you upon the state change, passing in the flow, flow run, and state objects. + +However, you can define your hook to have additional default arguments: + +```python +from prefect import flow + +data = {} + +def my_hook(flow, flow_run, state, my_arg="custom_value"): + data.update(my_arg=my_arg, state=state) + +@flow(on_completion=[my_hook]) +def lazy_flow(): + pass + +state = lazy_flow(return_state=True) + +assert data == {"my_arg": "custom_value", "state": state} + +``` + + +... or define your hook to accept arbitrary keyword arguments: + +```python +from functools import partial +from prefect import flow, task + +data = {} + +def my_hook(task, task_run, state, **kwargs): + data.update(state=state, **kwargs) + +@task +def bad_task(): + raise ValueError("meh") + +@flow +def ok_with_failure_flow(x: str = "foo", y: int = 42): + bad_task_with_a_hook = bad_task.with_options( + on_failure=[partial(my_hook, **dict(x=x, y=y))] + ) + # return a tuple of "bar" and the task run state + # to avoid raising the task's exception + return "bar", bad_task_with_a_hook(return_state=True) + +_, task_run_state = ok_with_failure_flow() + +assert data == {"x": "foo", "y": 42, "state": task_run_state} + +``` + + +### More examples of state change hooks + +* [Send a notification when a flow run fails](https://docs.prefect.io/guides/state-change-hooks/#send-a-notification-when-a-flow-run-fails) +* [Delete a Cloud Run job when a flow crashes](https://docs.prefect.io/guides/state-change-hooks/#delete-a-cloud-run-job-when-a-flow-crashes) \ No newline at end of file diff --git a/docs/2.19.x/concepts/task-runners.mdx b/docs/2.19.x/concepts/task-runners.mdx new file mode 100644 index 000000000000..e03af3436735 --- /dev/null +++ b/docs/2.19.x/concepts/task-runners.mdx @@ -0,0 +1,708 @@ +--- +title: task Runners +description: Task runners enable you to engage specific executors for Prefect tasks, such as for concurrent, parallel, or distributed execution of tasks. +--- + +Task runners are not required for task execution. If you call a task function directly, the task executes as a regular Python function, without a task runner, and produces whatever result is returned by the function. + +Task runner overview +--------------------------------------------------------------- + +Calling a task function from within a flow, using the default task settings, executes the function sequentially. Execution of the task function blocks execution of the flow until the task completes. This means, by default, calling multiple tasks in a flow causes them to run in order. + +However, that's not the only way to run tasks! + +You can use the `.submit()` method on a task function to submit the task to a _task runner_. Using a task runner enables you to control whether tasks run sequentially, concurrently, or if you want to take advantage of a parallel or distributed execution library such as Dask or Ray. + +Using the `.submit()` method to submit a task also causes the task run to return a [`PrefectFuture`](https://docs.prefect.io/api-ref/prefect/futures/#prefect.futures.PrefectFuture), a Prefect object that contains both any _data_ returned by the task function and a [`State`](https://docs.prefect.io/api-ref/server/schemas/states/), a Prefect object indicating the state of the task run. + +Prefect currently provides the following built-in task runners: + +* [`SequentialTaskRunner`](https://docs.prefect.io/api-ref/prefect/task-runners/#prefect.task_runners.SequentialTaskRunner) can run tasks sequentially. +* [`ConcurrentTaskRunner`](https://docs.prefect.io/api-ref/prefect/task-runners/#prefect.task_runners.ConcurrentTaskRunner) can run tasks concurrently, allowing tasks to switch when blocking on IO. Tasks will be submitted to a thread pool maintained by `anyio`. + +In addition, the following Prefect-developed task runners for parallel or distributed task execution may be installed as [Prefect Integrations](https://docs.prefect.io/integrations/catalog/). + +* [`DaskTaskRunner`](https://prefecthq.github.io/prefect-dask/) can run tasks requiring parallel execution using [`dask.distributed`](http://distributed.dask.org/). +* [`RayTaskRunner`](https://prefecthq.github.io/prefect-ray/) can run tasks requiring parallel execution using [Ray](https://www.ray.io/). + + +**Concurrency versus parallelism** + + +The words "concurrency" and "parallelism" may sound the same, but they mean different things in computing. + +**Concurrency** refers to a system that can do more than one thing simultaneously, but not at the _exact_ same time. It may be more accurate to think of concurrent execution as non-blocking: within the restrictions of resources available in the execution environment and data dependencies between tasks, execution of one task does not block execution of other tasks in a flow. + +**Parallelism** refers to a system that can do more than one thing at the _exact_ same time. Again, within the restrictions of resources available, parallel execution can run tasks at the same time, such as for operations mapped across a dataset. + + +Using a task runner +------------------------------------------------------------- + +You do not need to specify a task runner for a flow unless your tasks require a specific type of execution. + +To configure your flow to use a specific task runner, import a task runner and assign it as an argument for the flow when the flow is defined. + +**Remember to call `.submit()` when using a task runner** + +Make sure you use `.submit()` to run your task with a task runner. Calling the task directly, without `.submit()`, from within a flow will run the task sequentially instead of using a specified task runner. + + +For example, you can use `ConcurrentTaskRunner` to allow tasks to switch when they would block. + +```python +from prefect import flow, task +from prefect.task_runners import ConcurrentTaskRunner +import time + +@task +def stop_at_floor(floor): + print(f"elevator moving to floor {floor}") + time.sleep(floor) + print(f"elevator stops on floor {floor}") + +@flow(task_runner=ConcurrentTaskRunner()) +def elevator(): + for floor in range(10, 0, -1): + stop_at_floor.submit(floor) + +``` + + +If you specify an uninitialized task runner class, a task runner instance of that type is created with the default settings. You can also pass additional configuration parameters for task runners that accept parameters, such as [`DaskTaskRunner`](https://prefecthq.github.io/prefect-dask/) and [`RayTaskRunner`](https://prefecthq.github.io/prefect-ray/). + + +**Default task runner** + +If you don't specify a task runner for a flow and you call a task with `.submit()` within the flow, Prefect uses the default `ConcurrentTaskRunner`. + + +Running tasks sequentially +--------------------------------------------------------------------------- + +Sometimes, it's useful to force tasks to run sequentially to make it easier to reason about the behavior of your program. Switching to the `SequentialTaskRunner` will force submitted tasks to run sequentially rather than concurrently. + + +**Synchronous and asynchronous tasks** + +The `SequentialTaskRunner` works with both synchronous and asynchronous task functions. Asynchronous tasks are Python functions defined using `async def` rather than `def`. + + +The following example demonstrates using the `SequentialTaskRunner` to ensure that tasks run sequentially. In the example, the flow `glass_tower` runs the task `stop_at_floor` for floors one through 38, in that order. + +```python +from prefect import flow, task +from prefect.task_runners import SequentialTaskRunner +import random + +@task +def stop_at_floor(floor): + situation = random.choice(["on fire","clear"]) + print(f"elevator stops on {floor} which is {situation}") + +@flow(task_runner=SequentialTaskRunner(), + name="towering-infernflow", + ) +def glass_tower(): + for floor in range(1, 39): + stop_at_floor.submit(floor) + +glass_tower() + +``` + + +Using multiple task runners +----------------------------------------------------------------------------- + +Each flow can only have a single task runner, but sometimes you may want a subset of your tasks to run using a specific task runner. In this case, you can create [subflows](https://docs.prefect.io/concepts/flows/#composing-flows) for tasks that need to use a different task runner. + +For example, you can have a flow (in the example below called `sequential_flow`) that runs its tasks locally using the `SequentialTaskRunner`. If you have some tasks that can run more efficiently in parallel on a Dask cluster, you could create a subflow (such as `dask_subflow`) to run those tasks using the `DaskTaskRunner`. + +```python +from prefect import flow, task +from prefect.task_runners import SequentialTaskRunner +from prefect_dask.task_runners import DaskTaskRunner + +@task +def hello_local(): + print("Hello!") + +@task +def hello_dask(): + print("Hello from Dask!") + +@flow(task_runner=SequentialTaskRunner()) +def sequential_flow(): + hello_local.submit() + dask_subflow() + hello_local.submit() + +@flow(task_runner=DaskTaskRunner()) +def dask_subflow(): + hello_dask.submit() + +if __name__ == "__main__": + sequential_flow() + +``` + + +**Guarding main** + +Note that you should guard the `main` function by using `if __name__ == "__main__"` to avoid issues with parallel processing. + + +This script outputs the following logs demonstrating the use of the Dask task runner: + +``` +120:14:29.785 | INFO | prefect.engine - Created flow run 'ivory-caiman' for flow 'sequential-flow' +20:14:29.785 | INFO | Flow run 'ivory-caiman' - Starting 'SequentialTaskRunner'; submitted tasks will be run sequentially... +20:14:29.880 | INFO | Flow run 'ivory-caiman' - Created task run 'hello_local-7633879f-0' for task 'hello_local' +20:14:29.881 | INFO | Flow run 'ivory-caiman' - Executing 'hello_local-7633879f-0' immediately... +Hello! +20:14:29.904 | INFO | Task run 'hello_local-7633879f-0' - Finished in state Completed() +20:14:29.952 | INFO | Flow run 'ivory-caiman' - Created subflow run 'nimble-sparrow' for flow 'dask-subflow' +20:14:29.953 | INFO | prefect.task_runner.dask - Creating a new Dask cluster with `distributed.deploy.local.LocalCluster` +20:14:31.862 | INFO | prefect.task_runner.dask - The Dask dashboard is available at http://127.0.0.1:8787/status +20:14:31.901 | INFO | Flow run 'nimble-sparrow' - Created task run 'hello_dask-2b96d711-0' for task 'hello_dask' +20:14:32.370 | INFO | Flow run 'nimble-sparrow' - Submitted task run 'hello_dask-2b96d711-0' for execution. +Hello from Dask! +20:14:33.358 | INFO | Flow run 'nimble-sparrow' - Finished in state Completed('All states completed.') +20:14:33.368 | INFO | Flow run 'ivory-caiman' - Created task run 'hello_local-7633879f-1' for task 'hello_local' +20:14:33.368 | INFO | Flow run 'ivory-caiman' - Executing 'hello_local-7633879f-1' immediately... +Hello! +20:14:33.386 | INFO | Task run 'hello_local-7633879f-1' - Finished in state Completed() +20:14:33.399 | INFO | Flow run 'ivory-caiman' - Finished in state Completed('All states completed.') + +``` + + +Using results from submitted tasks +------------------------------------------------------------------------------------------- + +When you use `.submit()` to submit a task to a task runner, the task runner creates a [`PrefectFuture`](https://docs.prefect.io/api-ref/prefect/futures/#prefect.futures.PrefectFuture) for access to the state and result of the task. + +A `PrefectFuture` is an object that provides access to a computation happening in a task runner — even if that computation is happening on a remote system. + +In the following example, we save the return value of calling `.submit()` on the task `say_hello` to the variable `future`, and then we print the type of the variable: + +```python +from prefect import flow, task + +@task +def say_hello(name): + return f"Hello {name}!" + +@flow +def hello_world(): + future = say_hello.submit("Marvin") + print(f"variable 'future' is type {type(future)}") + +hello_world() + +``` + + +When you run this code, you'll see that the variable `future` is a `PrefectFuture`: + +```python +variable 'future' is type + +``` + + +When you pass a future into a task, Prefect waits for the "upstream" task — the one that the future references — to reach a final state before starting the downstream task. + +This means that the downstream task won't receive the `PrefectFuture` you passed as an argument. Instead, the downstream task will receive the value that the upstream task returned. + +Take a look at how this works in the following example + +```python +from prefect import flow, task + +@task +def say_hello(name): + return f"Hello {name}!" + +@task +def print_result(result): + print(type(result)) + print(result) + +@flow(name="hello-flow") +def hello_world(): + future = say_hello.submit("Marvin") + print_result.submit(future) + +hello_world() + +``` + + +```python + +Hello Marvin! + +``` + + +Futures have a few useful methods. For example, you can get the return value of the task run with [`.result()`](https://docs.prefect.io/api-ref/prefect/futures/#prefect.futures.PrefectFuture.result): + +```python +from prefect import flow, task + +@task +def my_task(): + return 42 + +@flow +def my_flow(): + future = my_task.submit() + result = future.result() + print(result) + +my_flow() + +``` + + +The `.result()` method will wait for the task to complete before returning the result to the caller. If the task run fails, `.result()` will raise the task run's exception. You may disable this behavior with the `raise_on_failure` option: + +```python +from prefect import flow, task + +@task +def my_task(): + return "I'm a task!" + + +@flow +def my_flow(): + future = my_task.submit() + result = future.result(raise_on_failure=False) + if future.get_state().is_failed(): + # `result` is an exception! handle accordingly + ... + else: + # `result` is the expected return value of our task + ... + +``` + + +You can retrieve the current state of the task run associated with the `PrefectFuture` using [`.get_state()`](https://docs.prefect.io/api-ref/prefect/futures/#prefect.futures.PrefectFuture.get_state): + +```python +@flow +def my_flow(): + future = my_task.submit() + state = future.get_state() + +``` + + +You can also wait for a task to complete by using the [`.wait()`](https://docs.prefect.io/api-ref/prefect/futures/#prefect.futures.PrefectFuture.wait) method: + +```python +@flow +def my_flow(): + future = my_task.submit() + final_state = future.wait() + +``` + + +You can include a timeout in the `wait` call to perform logic if the task has not finished in a given amount of time: + +```python +@flow +def my_flow(): + future = my_task.submit() + final_state = future.wait(1) # Wait one second max + if final_state: + # Take action if the task is done + result = final_state.result() + else: + ... # Task action if the task is still running + +``` + + +You may also use the [`wait_for=[]`](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.Task.submit) parameter when calling a task, specifying upstream task dependencies. This enables you to control task execution order for tasks that do not share data dependencies. + +```python +@task +def task_a(): + pass + +@task +def task_b(): + pass + +@task +def task_c(): + pass + +@task +def task_d(): + pass + +@flow +def my_flow(): + a = task_a.submit() + b = task_b.submit() + # Wait for task_a and task_b to complete + c = task_c.submit(wait_for=[a, b]) + # task_d will wait for task_c to complete + # Note: If waiting for one task it must still be in a list. + d = task_d(wait_for=[c]) + +``` + + +### When to use `.result()` in flows + +The simplest pattern for writing a flow is either only using tasks or only using pure Python functions. When you need to mix the two, use `.result()`. + +Using only tasks: + +```python +from prefect import flow, task + +@task +def say_hello(name): + return f"Hello {name}!" + +@task +def say_nice_to_meet_you(hello_greeting): + return f"{hello_greeting} Nice to meet you :)" + +@flow +def hello_world(): + hello = say_hello.submit("Marvin") + nice_to_meet_you = say_nice_to_meet_you.submit(hello) + +hello_world() + +``` + + +Using only Python functions: + +```python +from prefect import flow, task + +def say_hello(name): + return f"Hello {name}!" + +def say_nice_to_meet_you(hello_greeting): + return f"{hello_greeting} Nice to meet you :)" + +@flow +def hello_world(): + # because this is just a Python function, calls will not be tracked + hello = say_hello("Marvin") + nice_to_meet_you = say_nice_to_meet_you(hello) + +hello_world() + +``` + + +Mixing tasks and Python functions: + +```python +from prefect import flow, task + +def say_hello_extra_nicely_to_marvin(hello): # not a task or flow! + if hello == "Hello Marvin!": + return "HI MARVIN!" + return hello + +@task +def say_hello(name): + return f"Hello {name}!" + +@task +def say_nice_to_meet_you(hello_greeting): + return f"{hello_greeting} Nice to meet you :)" + +@flow +def hello_world(): + # run a task and get the result + hello = say_hello.submit("Marvin").result() + + # not calling a task or flow + special_greeting = say_hello_extra_nicely_to_marvin(hello) + + # pass our modified greeting back into a task + nice_to_meet_you = say_nice_to_meet_you.submit(special_greeting) + + print(nice_to_meet_you.result()) + +hello_world() + +``` + + +Note that `.result()` also limits Prefect's ability to track task dependencies. In the "mixed" example above, Prefect will not be aware that `say_hello` is upstream of `nice_to_meet_you`. + + +**Calling `.result()` is blocking** + + +When calling `.result()`, be mindful your flow function will have to wait until the task run is completed before continuing. + +```python +from prefect import flow, task + +@task +def say_hello(name): + return f"Hello {name}!" + +@task +def do_important_stuff(): + print("Doing lots of important stuff!") + +@flow +def hello_world(): + # blocks until `say_hello` has finished + result = say_hello.submit("Marvin").result() + do_important_stuff.submit() + +hello_world() + +``` + + +Running tasks on Dask +----------------------------------------------------------------- + +The [`DaskTaskRunner`](https://prefecthq.github.io/prefect-dask/) is a parallel task runner that submits tasks to the [`dask.distributed`](http://distributed.dask.org/) scheduler. By default, a temporary Dask cluster is created for the duration of the flow run. If you already have a Dask cluster running, either local or cloud hosted, you can provide the connection URL via the `address` kwarg. + +1. Make sure the `prefect-dask` collection is installed: `pip install prefect-dask`. +2. In your flow code, import `DaskTaskRunner` from `prefect_dask.task_runners`. +3. Assign it as the task runner when the flow is defined using the `task_runner=DaskTaskRunner` argument. + +For example, this flow uses the `DaskTaskRunner` configured to access an existing Dask cluster at `http://my-dask-cluster`. + +```python +from prefect import flow +from prefect_dask.task_runners import DaskTaskRunner + +@flow(task_runner=DaskTaskRunner(address="http://my-dask-cluster")) +def my_flow(): + ... + +``` + + +`DaskTaskRunner` accepts the following optional parameters: + + + +| Parameter | Description | +|------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `address` | Address of a currently running Dask scheduler. | +| `cluster_class` | The cluster class to use when creating a temporary Dask cluster. It can be either the full class name (for example, `"distributed.LocalCluster"`), or the class itself. | +| `cluster_kwargs` | Additional kwargs to pass to the `cluster_class` when creating a temporary Dask cluster. | +| `adapt_kwargs` | Additional kwargs to pass to `cluster.adapt` when creating a temporary Dask cluster. Note that adaptive scaling is only enabled if `adapt_kwargs` are provided. | +| `client_kwargs` | Additional kwargs to use when creating a [`dask.distributed.Client`](https://distributed.dask.org/en/latest/api.html#client). | + + +**Multiprocessing safety** + +Note that, because the `DaskTaskRunner` uses multiprocessing, calls to flows in scripts must be guarded with `if __name__ == "__main__":` or you will encounter warnings and errors. + +If you don't provide the `address` of a Dask scheduler, Prefect creates a temporary local cluster automatically. The number of workers used is based on the number of cores on your machine. The default provides a mix of processes and threads that should work well for most workloads. If you want to specify this explicitly, you can pass values for `n_workers` or `threads_per_worker` to `cluster_kwargs`. + +```python +# Use 4 worker processes, each with 2 threads +DaskTaskRunner( + cluster_kwargs={"n_workers": 4, "threads_per_worker": 2} +) + +``` + + +### Using a temporary cluster + +The `DaskTaskRunner` is capable of creating a temporary cluster using any of [Dask's cluster-manager options](https://docs.dask.org/en/latest/setup.html). This can be useful when you want each flow run to have its own Dask cluster, allowing for per-flow adaptive scaling. + +To configure, you need to provide a `cluster_class`. This can be: + +* A string specifying the import path to the cluster class (for example, `"dask_cloudprovider.aws.FargateCluster"`) +* The cluster class itself +* A function for creating a custom cluster. + +You can also configure `cluster_kwargs`, which takes a dictionary of keyword arguments to pass to `cluster_class` when starting the flow run. + +For example, to configure a flow to use a temporary `dask_cloudprovider.aws.FargateCluster` with 4 workers running with an image named `my-prefect-image`: + +```python +DaskTaskRunner( + cluster_class="dask_cloudprovider.aws.FargateCluster", + cluster_kwargs={"n_workers": 4, "image": "my-prefect-image"}, +) + +``` + + +### Connecting to an existing cluster + +Multiple Prefect flow runs can all use the same existing Dask cluster. You might manage a single long-running Dask cluster (maybe using the Dask [Helm Chart](https://docs.dask.org/en/latest/setup/kubernetes-helm.html)) and configure flows to connect to it during execution. This has a few downsides when compared to using a temporary cluster (as described above): + +* All workers in the cluster must have dependencies installed for all flows you intend to run. +* Multiple flow runs may compete for resources. Dask tries to do a good job sharing resources between tasks, but you may still run into issues. + +That said, you may prefer managing a single long-running cluster. + +To configure a `DaskTaskRunner` to connect to an existing cluster, pass in the address of the scheduler to the `address` argument: + +``` +# Connect to an existing cluster running at a specified address +DaskTaskRunner(address="tcp://...") + +``` + + +### Adaptive scaling + +One nice feature of using a `DaskTaskRunner` is the ability to scale adaptively to the workload. Instead of specifying `n_workers` as a fixed number, this lets you specify a minimum and maximum number of workers to use, and the dask cluster will scale up and down as needed. + +To do this, you can pass `adapt_kwargs` to `DaskTaskRunner`. This takes the following fields: + +* `maximum` (`int` or `None`, optional): the maximum number of workers to scale to. Set to `None` for no maximum. +* `minimum` (`int` or `None`, optional): the minimum number of workers to scale to. Set to `None` for no minimum. + +For example, here we configure a flow to run on a `FargateCluster` scaling up to at most 10 workers. + +```python +DaskTaskRunner( + cluster_class="dask_cloudprovider.aws.FargateCluster", + adapt_kwargs={"maximum": 10} +) + +``` + + +### Dask annotations + +Dask annotations can be used to further control the behavior of tasks. + +For example, we can set the [priority](http://distributed.dask.org/en/stable/priority.html) of tasks in the Dask scheduler: + +```python +import dask +from prefect import flow, task +from prefect_dask.task_runners import DaskTaskRunner + +@task +def show(x): + print(x) + + +@flow(task_runner=DaskTaskRunner()) +def my_flow(): + with dask.annotate(priority=-10): + future = show.submit(1) # low priority task + + with dask.annotate(priority=10): + future = show.submit(2) # high priority task + +``` + + +Another common use case is [resource](http://distributed.dask.org/en/stable/resources.html) annotations: + +```python +import dask +from prefect import flow, task +from prefect_dask.task_runners import DaskTaskRunner + +@task +def show(x): + print(x) + +# Create a `LocalCluster` with some resource annotations +# Annotations are abstract in dask and not inferred from your system. +# Here, we claim that our system has 1 GPU and 1 process available per worker +@flow( + task_runner=DaskTaskRunner( + cluster_kwargs={"n_workers": 1, "resources": {"GPU": 1, "process": 1}} + ) +) + +def my_flow(): + with dask.annotate(resources={'GPU': 1}): + future = show(0) # this task requires 1 GPU resource on a worker + + with dask.annotate(resources={'process': 1}): + # These tasks each require 1 process on a worker; because we've + # specified that our cluster has 1 process per worker and 1 worker, + # these tasks will run sequentially + future = show(1) + future = show(2) + future = show(3) + + +if __name__ == "__main__": + my_flow() + +``` + + +Running tasks on Ray +--------------------------------------------------------------- + +The [`RayTaskRunner`](https://prefecthq.github.io/prefect-ray/) — installed separately as a [Prefect Collection](https://docs.prefect.io/collections/catalog/) — is a parallel task runner that submits tasks to [Ray](https://www.ray.io/). By default, a temporary Ray instance is created for the duration of the flow run. If you already have a Ray instance running, you can provide the connection URL via an `address` argument. + + +**Remote storage and Ray tasks** + +We recommend configuring [remote storage](https://docs.prefect.io/concepts/storage/) for task execution with the `RayTaskRunner`. This ensures tasks executing in Ray have access to task result storage, particularly when accessing a Ray instance outside of your execution environment. + + +To configure your flow to use the `RayTaskRunner`: + +1. Make sure the `prefect-ray` collection is installed: `pip install prefect-ray`. +2. In your flow code, import `RayTaskRunner` from `prefect_ray.task_runners`. +3. Assign it as the task runner when the flow is defined using the `task_runner=RayTaskRunner` argument. + +For example, this flow uses the `RayTaskRunner` configured to access an existing Ray instance at `ray://192.0.2.255:8786`. + +```python +from prefect import flow +from prefect_ray.task_runners import RayTaskRunner + +@flow(task_runner=RayTaskRunner(address="ray://192.0.2.255:8786")) +def my_flow(): + ... + +``` + + +`RayTaskRunner` accepts the following optional parameters: + + +|Parameter |Description | +|-----------|--------------------------------------------------------------------------| +|address |Address of a currently running Ray instance, starting with the ray:// URI.| +|init_kwargs|Additional kwargs to use when calling ray.init. | + + +Note that Ray Client uses the [ray://](https://docs.ray.io/en/master/cluster/ray-client.html) URI to indicate the address of a Ray instance. If you don't provide the `address` of a Ray instance, Prefect creates a temporary instance automatically. + +**Ray environment limitations** + + +While we're excited about adding support for parallel task execution via Ray to Prefect, there are some inherent limitations with Ray you should be aware of: + +Ray's support for Python 3.11 is [experimental](https://docs.ray.io/en/latest/ray-overview/installation.html#install-nightlies). + +Ray support for non-x86/64 architectures such as ARM/M1 processors with installation from `pip` alone and will be skipped during installation of Prefect. It is possible to manually install the blocking component with `conda`. See the [Ray documentation](https://docs.ray.io/en/latest/ray-overview/installation.html#m1-mac-apple-silicon-support) for instructions. + +See the [Ray installation documentation](https://docs.ray.io/en/latest/ray-overview/installation.html) for further compatibility information. + \ No newline at end of file diff --git a/docs/2.19.x/concepts/tasks.mdx b/docs/2.19.x/concepts/tasks.mdx new file mode 100644 index 000000000000..50d31cc72498 --- /dev/null +++ b/docs/2.19.x/concepts/tasks.mdx @@ -0,0 +1,760 @@ +--- +title: Tasks +description: A task is a function that represents a discrete unit of work in a Prefect workflow. Tasks are not required — you may define Prefect workflows that consist only of flows, using regular Python statements and functions. Tasks enable you to encapsulate elements of your workflow logic in observable units that can be reused across flows and subflows. +--- + + +Tasks overview +--------------------------------------------------- + +Tasks are functions: they can take inputs, perform work, and return an output. A Prefect task can do almost anything a Python function can do. + +Tasks are special because they receive metadata about upstream dependencies and the state of those dependencies before they run, even if they don't receive any explicit data inputs from them. This gives you the opportunity to, for example, have a task wait on the completion of another task before executing. + +Tasks also take advantage of automatic Prefect [logging](https://docs.prefect.io/concepts/logs/) to capture details about task runs such as runtime, tags, and final state. + +You can define your tasks within the same file as your flow definition, or you can define tasks within modules and import them for use in your flow definitions. Tasks may be called from within a flow, from within a subflow, or (as of `prefect 2.18.x`) from within another task. + +**Calling a task from a flow** + +Use the `@task` decorator to designate a function as a task. Calling the task creates a new task run: + +```python +from prefect import flow, task + +@task +def my_task(): + print("Hello, I'm a task") + +@flow +def my_flow(): + my_task() + +``` + + +**Calling a task from another task** + +As of `prefect 2.18.x`, you can call a task from within another task: + +```python +from prefect import task + +@task +def my_task(): + print("Hello, I'm a task") + +@task(log_prints=True) +def my_parent_task(): + my_task() + +``` + + +Tasks are uniquely identified by a task key, which is a hash composed of the task name, the fully-qualified name of the function, and any tags. If the task does not have a name specified, the name is derived from the task function. + +How big should a task be? + +Prefect encourages "small tasks" — each one should represent a single logical step of your workflow. This allows Prefect to better contain task failures. + +To be clear, there's nothing stopping you from putting all of your code in a single task — Prefect will happily run it! However, if any line of code fails, the entire task will fail and must be retried from the beginning. This can be avoided by splitting the code into multiple dependent tasks. + +Task arguments +--------------------------------------------------- + +Tasks allow for customization through optional arguments: + + +| Argument | Description | +|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `name` | An optional name for the task. If not provided, the name will be inferred from the function name. | +| `description` | An optional string description for the task. If not provided, the description will be pulled from the docstring for the decorated function. | +| `tags` | An optional set of tags to be associated with runs of this task. These tags are combined with any tags defined by a prefect.tags context at task runtime. | +| `cache_key_fn` | An optional callable that, given the task run context and call parameters, generates a string key. If the key matches a previous completed state, that state result will be restored instead of running the task again. | +| `cache_expiration` | An optional amount of time indicating how long cached states for this task should be restorable; if not provided, cached states will never expire. | +| `retries` | An optional number of times to retry on task run failure. | +| `retry_delay_seconds` | An optional number of seconds to wait before retrying the task after failure. This is only applicable if retries is nonzero. | +| `log_prints` | An optional boolean indicating whether to log print statements. | +| `persist_result` | An optional boolean indicating whether to persist the result of the task run to storage. | + +See all possible parameters in the [Python SDK API docs](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.task). + +For example, you can provide a `name` value for the task. Here we've used the optional `description` argument as well. + +```python +@task(name="hello-task", + description="This task says hello.") +def my_task(): + print("Hello, I'm a task") + +``` + + +You can distinguish runs of this task by providing a `task_run_name`; this setting accepts a string that can optionally contain templated references to the keyword arguments of your task. The name will be formatted using Python's standard string formatting syntax as can be seen here: + +```python +import datetime +from prefect import flow, task + +@task(name="My Example Task", + description="An example task for a tutorial.", + task_run_name="hello-{name}-on-{date:%A}") +def my_task(name, date): + pass + +@flow +def my_flow(): + # creates a run with a name like "hello-marvin-on-Thursday" + my_task(name="marvin", date=datetime.datetime.now(datetime.timezone.utc)) + +``` + + +Additionally this setting also accepts a function that returns a string to be used for the task run name: + +```python +import datetime +from prefect import flow, task + +def generate_task_name(): + date = datetime.datetime.now(datetime.timezone.utc) + return f"{date:%A}-is-a-lovely-day" + +@task(name="My Example Task", + description="An example task for a tutorial.", + task_run_name=generate_task_name) +def my_task(name): + pass + +@flow +def my_flow(): + # creates a run with a name like "Thursday-is-a-lovely-day" + my_task(name="marvin") + +``` + + +If you need access to information about the task, use the `prefect.runtime` module. For example: + +```python +from prefect import flow +from prefect.runtime import flow_run, task_run + +def generate_task_name(): + flow_name = flow_run.flow_name + task_name = task_run.task_name + + parameters = task_run.parameters + name = parameters["name"] + limit = parameters["limit"] + + return f"{flow_name}-{task_name}-with-{name}-and-{limit}" + +@task(name="my-example-task", + description="An example task for a tutorial.", + task_run_name=generate_task_name) +def my_task(name: str, limit: int = 100): + pass + +@flow +def my_flow(name: str): + # creates a run with a name like "my-flow-my-example-task-with-marvin-and-100" + my_task(name="marvin") + +``` + + +Tags are optional string labels that enable you to identify and group tasks other than by name or flow. Tags are useful for: + +* Filtering task runs by tag in the UI and via the [Prefect REST API](https://docs.prefect.io/api-ref/rest-api/#filtering). +* Setting [concurrency limits](#task-run-concurrency-limits) on task runs by tag. + +Tags may be specified as a keyword argument on the [task decorator](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.task). + +```python +@task(name="hello-task", tags=["test"]) +def my_task(): + print("Hello, I'm a task") + +``` + + +You can also provide tags as an argument with a [`tags` context manager](https://docs.prefect.io/api-ref/prefect/context/#prefect.context.tags), specifying tags when the task is called rather than in its definition. + +```python +from prefect import flow, task +from prefect import tags + +@task +def my_task(): + print("Hello, I'm a task") + +@flow +def my_flow(): + with tags("test"): + my_task() + +``` + + +Retries +------------------------------------- + +Prefect can automatically retry tasks on failure. In Prefect, a task _fails_ if its Python function raises an exception. + +To enable retries, pass `retries` and `retry_delay_seconds` parameters to your task. If the task fails, Prefect will retry it up to `retries` times, waiting `retry_delay_seconds` seconds between each attempt. If the task fails on the final retry, Prefect marks the task as _crashed_ if the task raised an exception or _failed_ if it returned a string. + +**Retries don't create new task runs** + +A new task run is not created when a task is retried. A new state is added to the state history of the original task run. + +### A real-world example: making an API request + +Consider the real-world problem of making an API request. In this example, we'll use the [`httpx`](https://www.python-httpx.org/) library to make an HTTP request. + +```python +import httpx + +from prefect import flow, task + + +@task(retries=2, retry_delay_seconds=5) +def get_data_task( + url: str = "https://api.brittle-service.com/endpoint" +) -> dict: + response = httpx.get(url) + + # If the response status code is anything but a 2xx, httpx will raise + # an exception. This task doesn't handle the exception, so Prefect will + # catch the exception and will consider the task run failed. + response.raise_for_status() + + return response.json() + + +@flow +def get_data_flow(): + get_data_task() + +``` + + +In this task, if the HTTP request to the brittle API receives any status code other than a 2xx (200, 201, etc.), Prefect will retry the task a maximum of two times, waiting five seconds in between retries. + +### Custom retry behavior + +The `retry_delay_seconds` option accepts a list of delays for more custom retry behavior. The following task will wait for successively increasing intervals of 1, 10, and 100 seconds, respectively, before the next attempt starts: + +```python +from prefect import task + +@task(retries=3, retry_delay_seconds=[1, 10, 100]) +def some_task_with_manual_backoff_retries(): + ... + +``` + + +The `retry_condition_fn` option accepts a callable that returns a boolean. If the callable returns `True`, the task will be retried. If the callable returns `False`, the task will not be retried. The callable accepts three arguments — the task, the task run, and the state of the task run. The following task will retry on HTTP status codes other than 401 or 404: + +```python +import httpx +from prefect import flow, task + +def retry_handler(task, task_run, state) -> bool: + """This is a custom retry handler to handle when we want to retry a task""" + try: + # Attempt to get the result of the task + state.result() + except httpx.HTTPStatusError as exc: + # Retry on any HTTP status code that is not 401 or 404 + do_not_retry_on_these_codes = [401, 404] + return exc.response.status_code not in do_not_retry_on_these_codes + except httpx.ConnectError: + # Do not retry + return False + except: + # For any other exception, retry + return True + +@task(retries=1, retry_condition_fn=retry_handler) +def my_api_call_task(url): + response = httpx.get(url) + response.raise_for_status() + return response.json() + +@flow +def get_data_flow(url): + my_api_call_task(url=url) + +if __name__ == "__main__": + get_data_flow(url="https://httpbin.org/status/503") + +``` + + +Additionally, you can pass a callable that accepts the number of retries as an argument and returns a list. Prefect includes an [`exponential_backoff`](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.exponential_backoff) utility that will automatically generate a list of retry delays that correspond to an exponential backoff retry strategy. The following flow will wait for 10, 20, then 40 seconds before each retry. + +```python +from prefect import task +from prefect.tasks import exponential_backoff + +@task(retries=3, retry_delay_seconds=exponential_backoff(backoff_factor=10)) +def some_task_with_exponential_backoff_retries(): + ... + +``` + + +#### Advanced topic: adding "jitter" + +While using exponential backoff, you may also want to add _jitter_ to the delay times. Jitter is a random amount of time added to retry periods that helps prevent "thundering herd" scenarios, which is when many tasks all retry at the exact same time, potentially overwhelming systems. + +The `retry_jitter_factor` option can be used to add variance to the base delay. For example, a retry delay of 10 seconds with a `retry_jitter_factor` of 0.5 will be allowed to delay up to 15 seconds. Large values of `retry_jitter_factor` provide more protection against "thundering herds," while keeping the average retry delay time constant. For example, the following task adds jitter to its exponential backoff so the retry delays will vary up to a maximum delay time of 20, 40, and 80 seconds respectively. + +```python +from prefect import task +from prefect.tasks import exponential_backoff + +@task( + retries=3, + retry_delay_seconds=exponential_backoff(backoff_factor=10), + retry_jitter_factor=1, +) +def some_task_with_exponential_backoff_retries(): + ... + +``` + + +### Configuring retry behavior globally with settings + +You can also set retries and retry delays by using the following global settings. These settings will not override the `retries` or `retry_delay_seconds` that are set in the flow or task decorator. + +```python +prefect config set PREFECT_FLOW_DEFAULT_RETRIES=2 +prefect config set PREFECT_TASK_DEFAULT_RETRIES=2 +prefect config set PREFECT_FLOW_DEFAULT_RETRY_DELAY_SECONDS = [1, 10, 100] +prefect config set PREFECT_TASK_DEFAULT_RETRY_DELAY_SECONDS = [1, 10, 100] + +``` + + +Caching +------------------------------------- + +Caching refers to the ability of a task run to reflect a finished state without actually running the code that defines the task. This allows you to efficiently reuse results of tasks that may be expensive to run with every flow run, or reuse cached results if the inputs to a task have not changed. + +To determine whether a task run should retrieve a cached state, we use "cache keys". A cache key is a string value that indicates if one run should be considered identical to another. When a task run with a cache key finishes, we attach that cache key to the state. When each task run starts, Prefect checks for states with a matching cache key. If a state with an identical key is found, Prefect will use the cached state instead of running the task again. + +To enable caching, specify a `cache_key_fn` — a function that returns a cache key — on your task. You may optionally provide a `cache_expiration` timedelta indicating when the cache expires. If you do not specify a `cache_expiration`, the cache key does not expire. + +You can define a task that is cached based on its inputs by using the Prefect `task_input_hash`. This is a task cache key implementation that hashes all inputs to the task using a JSON or cloudpickle serializer. If the task inputs do not change, the cached results are used rather than running the task until the cache expires. + +Note that, if any arguments are not JSON serializable, the pickle serializer is used as a fallback. If cloudpickle fails, `task_input_hash` returns a null key indicating that a cache key could not be generated for the given inputs. + +In this example, until the `cache_expiration` time ends, as long as the input to `hello_task()` remains the same when it is called, the cached return value is returned. In this situation the task is not rerun. However, if the input argument value changes, `hello_task()` runs using the new input. + +```python +from datetime import timedelta +from prefect import flow, task +from prefect.tasks import task_input_hash + +@task(cache_key_fn=task_input_hash, cache_expiration=timedelta(days=1)) +def hello_task(name_input): + # Doing some work + print("Saying hello") + return "hello " + name_input + +@flow(log_prints=True) +def hello_flow(name_input): + hello_task(name_input) + +``` + + +Alternatively, you can provide your own function or other callable that returns a string cache key. A generic `cache_key_fn` is a function that accepts two positional arguments: + +* The first argument corresponds to the `TaskRunContext`, which stores task run metadata in the attributes `task_run_id`, `flow_run_id`, and `task`. +* The second argument corresponds to a dictionary of input values to the task. For example, if your task is defined with signature `fn(x, y, z)` then the dictionary will have keys `"x"`, `"y"`, and `"z"` with corresponding values that can be used to compute your cache key. + +Note that the `cache_key_fn` is _not_ defined as a `@task`. + +**Task cache keys** + +By default, a task cache key is limited to 2000 characters, specified by the `PREFECT_API_TASK_CACHE_KEY_MAX_LENGTH` setting. + + +```python +from prefect import task, flow + +def static_cache_key(context, parameters): + # return a constant + return "static cache key" + +@task(cache_key_fn=static_cache_key) +def cached_task(): + print('running an expensive operation') + return 42 + +@flow +def test_caching(): + cached_task() + cached_task() + cached_task() + +``` + + +In this case, there's no expiration for the cache key, and no logic to change the cache key, so `cached_task()` only runs once. + +```python +>>> test_caching() +running an expensive operation +>>> test_caching() +>>> test_caching() + +``` + + +When each task run requested to enter a `Running` state, it provided its cache key computed from the `cache_key_fn`. The Prefect backend identified that there was a COMPLETED state associated with this key and instructed the run to immediately enter the same COMPLETED state, including the same return values. + +A real-world example might include the flow run ID from the context in the cache key so only repeated calls in the same flow run are cached. + +```python +def cache_within_flow_run(context, parameters): + return f"{context.task_run.flow_run_id}-{task_input_hash(context, parameters)}" + +@task(cache_key_fn=cache_within_flow_run) +def cached_task(): + print('running an expensive operation') + return 42 + +``` + + +**Task results, retries, and caching** + +Task results are cached in memory during a flow run and persisted to the location specified by the `PREFECT_LOCAL_STORAGE_PATH` setting. As a result, task caching between flow runs is currently limited to flow runs with access to that local storage path. + +### Refreshing the cache + +Sometimes, you want a task to update the data associated with its cache key instead of using the cache. This is a cache "refresh". + +The `refresh_cache` option can be used to enable this behavior for a specific task: + +```python +import random + + +def static_cache_key(context, parameters): + # return a constant + return "static cache key" + + +@task(cache_key_fn=static_cache_key, refresh_cache=True) +def caching_task(): + return random.random() + +``` + + +When this task runs, it will _always_ update the cache key instead of using the cached value. This is particularly useful when you have a flow that is responsible for updating the cache. + +If you want to refresh the cache for all tasks, you can use the `PREFECT_TASKS_REFRESH_CACHE` setting. Setting `PREFECT_TASKS_REFRESH_CACHE=true` will change the default behavior of all tasks to refresh. This is particularly useful if you want to rerun a flow without cached results. + +If you have tasks that should not refresh when this setting is enabled, you may explicitly set `refresh_cache` to `False`. These tasks will never refresh the cache — if a cache key exists it will be read, not updated. Note that, if a cache key does _not_ exist yet, these tasks can still write to the cache. + +```python +@task(cache_key_fn=static_cache_key, refresh_cache=False) +def caching_task(): + return random.random() + +``` + + +Timeouts +--------------------------------------- + +Task timeouts are used to prevent unintentional long-running tasks. When the duration of execution for a task exceeds the duration specified in the timeout, a timeout exception will be raised and the task will be marked as failed. In the UI, the task will be visibly designated as `TimedOut`. From the perspective of the flow, the timed-out task will be treated like any other failed task. + +Timeout durations are specified using the `timeout_seconds` keyword argument. + +```python +from prefect import task +import time + +@task(timeout_seconds=1, log_prints=True) +def show_timeouts(): + print("I will execute") + time.sleep(5) + print("I will not execute") + +``` + + +Task results +----------------------------------------------- + +Depending on how you call tasks, they can return different types of results and optionally engage the use of a [task runner](https://docs.prefect.io/concepts/task-runners/). + +Any task can return: + +* Data , such as `int`, `str`, `dict`, `list`, and so on —  this is the default behavior any time you call `your_task()`. +* [`PrefectFuture`](https://docs.prefect.io/api-ref/prefect/futures/#prefect.futures.PrefectFuture) —  this is achieved by calling [`your_task.submit()`](https://docs.prefect.io/concepts/task-runners/#using-a-task-runner). A `PrefectFuture` contains both _data_ and _State_ +* Prefect [`State`](https://docs.prefect.io/api-ref/server/schemas/states/)  — anytime you call your task or flow with the argument `return_state=True`, it will directly return a state you can use to build custom behavior based on a state change you care about, such as task or flow failing or retrying. + +To run your task with a [task runner](https://docs.prefect.io/concepts/task-runners/), you must call the task with `.submit()`. + +See [state returned values](https://docs.prefect.io/concepts/task-runners/#using-results-from-submitted-tasks) for examples. + +**Task runners are optional** + +If you just need the result from a task, you can simply call the task from your flow. For most workflows, the default behavior of calling a task directly and receiving a result is all you'll need. + +Wait for +--------------------------------------- + +To create a dependency between two tasks that do not exchange data, but one needs to wait for the other to finish, use the special [`wait_for`](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.Task.submit) keyword argument: + +```python +@task +def task_1(): + pass + +@task +def task_2(): + pass + +@flow +def my_flow(): + x = task_1() + + # task 2 will wait for task_1 to complete + y = task_2(wait_for=[x]) + +``` + + +Map +----------------------------- + +Prefect provides a `.map()` implementation that automatically creates a task run for each element of its input data. Mapped tasks represent the computations of many individual children tasks. + +The simplest Prefect map takes a tasks and applies it to each element of its inputs. + +```python +from prefect import flow, task + +@task +def print_nums(nums): + for n in nums: + print(n) + +@task +def square_num(num): + return num**2 + +@flow +def map_flow(nums): + print_nums(nums) + squared_nums = square_num.map(nums) + print_nums(squared_nums) + +map_flow([1,2,3,5,8,13]) + +``` + + +Prefect also supports `unmapped` arguments, allowing you to pass static values that don't get mapped over. + +```python +from prefect import flow, task + +@task +def add_together(x, y): + return x + y + +@flow +def sum_it(numbers, static_value): + futures = add_together.map(numbers, static_value) + return futures + +sum_it([1, 2, 3], 5) + +``` + + +If your static argument is an iterable, you'll need to wrap it with `unmapped` to tell Prefect that it should be treated as a static value. + +```python +from prefect import flow, task, unmapped + +@task +def sum_plus(x, static_iterable): + return x + sum(static_iterable) + +@flow +def sum_it(numbers, static_iterable): + futures = sum_plus.map(numbers, static_iterable) + return futures + +sum_it([4, 5, 6], unmapped([1, 2, 3])) + +``` + + +Async tasks +--------------------------------------------- + +Prefect also supports asynchronous task and flow definitions by default. All of [the standard rules of async](https://docs.python.org/3/library/asyncio-task.html) apply: + +```python +import asyncio + +from prefect import task, flow + +@task +async def print_values(values): + for value in values: + await asyncio.sleep(1) # yield + print(value, end=" ") + +@flow +async def async_flow(): + await print_values([1, 2]) # runs immediately + coros = [print_values("abcd"), print_values("6789")] + + # asynchronously gather the tasks + await asyncio.gather(*coros) + +asyncio.run(async_flow()) + +``` + + +Note, if you are not using `asyncio.gather`, calling [`.submit()`](https://docs.prefect.io/concepts/task-runners/#using-a-task-runner) is required for asynchronous execution on the `ConcurrentTaskRunner`. + +Task run concurrency limits +----------------------------------------------------------------------------- + +There are situations in which you want to actively prevent too many tasks from running simultaneously. For example, if many tasks across multiple flows are designed to interact with a database that only allows 10 connections, you want to make sure that no more than 10 tasks that connect to this database are running at any given time. + +Prefect has built-in functionality for achieving this: task concurrency limits. + +Task concurrency limits use [task tags](#tags). You can specify an optional concurrency limit as the maximum number of concurrent task runs in a `Running` state for tasks with a given tag. The specified concurrency limit applies to any task to which the tag is applied. + +If a task has multiple tags, it will run only if _all_ tags have available concurrency. + +Tags without explicit limits are considered to have unlimited concurrency. + +**0 concurrency limit aborts task runs** + +Currently, if the concurrency limit is set to 0 for a tag, any attempt to run a task with that tag will be aborted instead of delayed. + +### Execution behavior + +Task tag limits are checked whenever a task run attempts to enter a [`Running` state](https://docs.prefect.io/concepts/states/). + +If there are no concurrency slots available for any one of your task's tags, the transition to a `Running` state will be delayed and the client is instructed to try entering a `Running` state again in 30 seconds (or the value specified by the `PREFECT_TASK_RUN_TAG_CONCURRENCY_SLOT_WAIT_SECONDS` setting). + +### Configuring concurrency limits + + +**Flow run concurrency limits are set at a work pool and/or work queue level** + +While task run concurrency limits are configured via tags (as shown below), [flow run concurrency limits](https://docs.prefect.io/latest/concepts/work-pools/#work-pool-concurrency) are configured via work pools and/or work queues. + +You can set concurrency limits on as few or as many tags as you wish. You can set limits through: + +* Prefect [CLI](#cli) +* Prefect API by using `PrefectClient` [Python client](#python-client) +* Prefect server UI or Prefect Cloud + +#### CLI + +You can create, list, and remove concurrency limits by using Prefect CLI `concurrency-limit` commands. + +``` +prefect concurrency-limit [command] [arguments] + +``` + + + +|Command|Description | +|-------|----------------------------------------------------------------| +|create |Create a concurrency limit by specifying a tag and limit. | +|delete |Delete the concurrency limit set on the specified tag. | +|inspect|View details about a concurrency limit set on the specified tag.| +|ls |View all defined concurrency limits. | + + +For example, to set a concurrency limit of 10 on the 'small\_instance' tag: + +``` +prefect concurrency-limit create small_instance 10 + +``` + + +To delete the concurrency limit on the 'small\_instance' tag: + +``` +prefect concurrency-limit delete small_instance + +``` + + +To view details about the concurrency limit on the 'small\_instance' tag: + +``` +prefect concurrency-limit inspect small_instance + +``` + + +#### Python client + +To update your tag concurrency limits programmatically, use [`PrefectClient.orchestration.create_concurrency_limit`](https://docs.prefect.io/2.19.1/api-ref/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient.create_concurrency_limit). + +`create_concurrency_limit` takes two arguments: + +* `tag` specifies the task tag on which you're setting a limit. +* `concurrency_limit` specifies the maximum number of concurrent task runs for that tag. + +For example, to set a concurrency limit of 10 on the 'small\_instance' tag: + +```python +from prefect import get_client + +async with get_client() as client: + # set a concurrency limit of 10 on the 'small_instance' tag + limit_id = await client.create_concurrency_limit( + tag="small_instance", + concurrency_limit=10 + ) + +``` + + +To remove all concurrency limits on a tag, use [`PrefectClient.delete_concurrency_limit_by_tag`](https://docs.prefect.io/api-ref/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient.delete_concurrency_limit_by_tag/), passing the tag: + +```python +async with get_client() as client: + # remove a concurrency limit on the 'small_instance' tag + await client.delete_concurrency_limit_by_tag(tag="small_instance") + +``` + + +If you wish to query for the currently set limit on a tag, use [`PrefectClient.read_concurrency_limit_by_tag`](https://docs.prefect.io/api-ref/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient.read_concurrency_limit_by_tag), passing the tag: + +To see _all_ of your limits across all of your tags, use [`PrefectClient.read_concurrency_limits`](https://docs.prefect.io/api-ref/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient.read_concurrency_limits). + +```python +async with get_client() as client: + # query the concurrency limit on the 'small_instance' tag + limit = await client.read_concurrency_limit_by_tag(tag="small_instance") + +``` diff --git a/docs/2.19.x/concepts/work-pools--workers.mdx b/docs/2.19.x/concepts/work-pools--workers.mdx new file mode 100644 index 000000000000..fc7572129b76 --- /dev/null +++ b/docs/2.19.x/concepts/work-pools--workers.mdx @@ -0,0 +1,579 @@ +--- +title: Work Pools & Workers +--- + +Work pools and workers bridge the Prefect _orchestration environment_ with your _execution environment_. When a [deployment](https://docs.prefect.io/concepts/deployments/) creates a flow run, it is submitted to a specific work pool for scheduling. A worker running in the execution environment can poll its respective work pool for new runs to execute, or the work pool can submit flow runs to serverless infrastructure directly, depending on your configuration. + +Work pool overview +----------------------------------------------------------- + +Work pools organize work for execution. Work pools have types corresponding to the infrastructure that will execute the flow code, as well as the delivery method of work to that environment. Pull work pools require [workers](#worker-overview) (or less ideally, [agents](https://docs.prefect.io/concepts/agents)) to poll the work pool for flow runs to execute. [Push work pools](https://docs.prefect.io/guides/deployment/push-work-pools) can submit runs directly to your serverless infrastructure providers such as Google Cloud Run, Azure Container Instances, and AWS ECS without the need for an agent or worker. [Managed work pools](https://docs.prefect.io/guides/managed-execution) are administered by Prefect and handle the submission and execution of code on your behalf. + + +**Work pools are like pub/sub topics** + +It's helpful to think of work pools as a way to coordinate (potentially many) deployments with (potentially many) workers through a known channel: the pool itself. This is similar to how "topics" are used to connect producers and consumers in a pub/sub or message-based system. By switching a deployment's work pool, users can quickly change the worker that will execute their runs, making it easy to promote runs through environments or even debug locally. + +In addition, users can control aspects of work pool behavior, such as how many runs the pool allows to be run concurrently or pausing delivery entirely. These options can be modified at any time, and any workers requesting work for a specific pool will only see matching flow runs. + +### Work pool configuration + +You can configure work pools by using any of the following: + +* Prefect UI +* Prefect CLI commands +* [Prefect REST API](https://docs.prefect.io/api-ref/rest-api/) +* [Terraform provider for Prefect Cloud](https://registry.terraform.io/providers/PrefectHQ/prefect/latest/docs/resources/work_pool) + +To manage work pools in the UI, click the **Work Pools** icon. This displays a list of currently configured work pools. + +![The UI displays a list of configured work pools](/images/work-pools-workers1.png) + +You can pause a work pool from this page by using the toggle. + +Select the **+** button to create a new work pool. You'll be able to specify the details for work served by this work pool. + +To create a work pool via the Prefect CLI, use the `prefect work-pool create` command: + +``` +prefect work-pool create [OPTIONS] NAME + +``` + + +`NAME` is a required, unique name for the work pool. + +Optional configuration parameters you can specify to filter work on the pool include: + + + +| Option | Description | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `--paused` | If provided, the work pool will be created in a paused state. | +| `--type` | The type of infrastructure that can execute runs from this work pool. | +| `--set-as-default` | Whether to use the created work pool as the local default for deployment. | +| `--base-job-template` | The path to a JSON file containing the base job template to use. If unspecified, Prefect will use the default base job template for the given worker type. | + +For example, to create a work pool called `test-pool`, you would run this command: + +``` +prefect work-pool create test-pool + +``` + + +### Work pool types + +If you don't use the `--type` flag to specify an infrastructure type, you are prompted to select from the following options: + + + +| Infrastructure Type | Description | +|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Prefect Agent | Execute flow runs on heterogeneous infrastructure using infrastructure blocks. | +| Process | Execute flow runs as subprocesses on a worker. Works well for local execution when first getting started. | +| AWS Elastic Container Service | Execute flow runs within containers on AWS ECS. Works with EC2 and Fargate clusters. Requires an AWS account. | +| Azure Container Instances | Execute flow runs within containers on Azure's Container Instances service. Requires an Azure account. | +| Docker | Execute flow runs within Docker containers. Works well for managing flow execution environments via Docker images. Requires access to a running Docker daemon. | +| Google Cloud Run | Execute flow runs within containers on Google Cloud Run. Requires a Google Cloud Platform account. | +| Google Cloud Run V2 | Execute flow runs within containers on Google Cloud Run (V2 API). Requires a Google Cloud Platform account. | +| Google Vertex AI | Execute flow runs within containers on Google Vertex AI. Requires a Google Cloud Platform account. | +| Kubernetes | Execute flow runs within jobs scheduled on a Kubernetes cluster. Requires a Kubernetes cluster. | +| Google Cloud Run - Push | Execute flow runs within containers on Google Cloud Run. Requires a Google Cloud Platform account. Flow runs are pushed directly to your environment, without the need for a Prefect worker. | +| AWS Elastic Container Service - Push | Execute flow runs within containers on AWS ECS. Works with existing ECS clusters and serverless execution via AWS Fargate. Requires an AWS account. Flow runs are pushed directly to your environment, without the need for a Prefect worker. | +| Azure Container Instances - Push | Execute flow runs within containers on Azure's Container Instances service. Requires an Azure account. Flow runs are pushed directly to your environment, without the need for a Prefect worker. | +| Modal - Push | Execute flow runs on Modal. Requires a Modal account. Flow runs are pushed directly to your Modal workspace, without the need for a Prefect worker. | +| Prefect Managed | Execute flow runs within containers on Prefect managed infrastructure. | + + + +| Infrastructure Type | Description | +|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Prefect Agent | Execute flow runs on heterogeneous infrastructure using infrastructure blocks. | +| Process | Execute flow runs as subprocesses on a worker. Works well for local execution when first getting started. | +| AWS Elastic Container Service | Execute flow runs within containers on AWS ECS. Works with EC2 and Fargate clusters. Requires an AWS account. | +| Azure Container Instances | Execute flow runs within containers on Azure's Container Instances service. Requires an Azure account. | +| Docker | Execute flow runs within Docker containers. Works well for managing flow execution environments via Docker images. Requires access to a running Docker daemon. | +| Google Cloud Run | Execute flow runs within containers on Google Cloud Run. Requires a Google Cloud Platform account. | +| Google Cloud Run V2 | Execute flow runs within containers on Google Cloud Run (V2 API). Requires a Google Cloud Platform account. | +| Google Vertex AI | Execute flow runs within containers on Google Vertex AI. Requires a Google Cloud Platform account. | +| Kubernetes | Execute flow runs within jobs scheduled on a Kubernetes cluster. Requires a Kubernetes cluster. | + + + + + + + +On success, the command returns the details of the newly created work pool. + +``` +Created work pool with properties: + name - 'test-pool' + id - a51adf8c-58bb-4949-abe6-1b87af46eabd + concurrency limit - None + +Start a worker to pick up flows from the work pool: + prefect worker start -p 'test-pool' + +Inspect the work pool: + prefect work-pool inspect 'test-pool' + +``` + + +Set a work pool as the default for new deployments by adding the `--set-as-default` flag. + +Which would result in output similar to the following: + +``` +Set 'test-pool' as default work pool for profile 'default' + +To change your default work pool, run: + + prefect config set PREFECT_DEFAULT_WORK_POOL_NAME= + +``` + + +To update a work pool via the Prefect CLI, use the `prefect work-pool update` command: + +``` +prefect work-pool update [OPTIONS] NAME + +``` + + +`NAME` is the name of the work pool to update. + +Optional configuration parameters you can specify to update the work pool include: + + + +| Option | Description | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `--base-job-template` | The path to a JSON file containing the base job template to use. If unspecified, Prefect will use the default base job template for the given worker type. | +| `--description` | A description of the work pool. | +| `--concurrency-limit` | The maximum number of flow runs to run simultaneously in the work pool. | + + +**Managing work pools in CI/CD** + +You can version control your base job template by committing it as a JSON file to your repository and control updates to your work pools' base job templates by using the `prefect work-pool update` command in your CI/CD pipeline. For example, you could use the following command to update a work pool's base job template to the contents of a file named `base-job-template.json`: + +``` +prefect work-pool update --base-job-template base-job-template.json my-work-pool + +``` + + +#### Base job template + +Each work pool has a base job template that allows the customization of the behavior of the worker executing flow runs from the work pool. + +The base job template acts as a contract defining the configuration passed to the worker for each flow run and the options available to deployment creators to customize worker behavior per deployment. + +A base job template comprises a `job_configuration` section and a `variables` section. + +The `variables` section defines the fields available to be customized per deployment. The `variables` section follows the [OpenAPI specification](https://swagger.io/specification/), which allows work pool creators to place limits on provided values (type, minimum, maximum, etc.). + +The job configuration section defines how values provided for fields in the variables section should be translated into the configuration given to a worker when executing a flow run. + +The values in the `job_configuration` can use placeholders to reference values provided in the `variables` section. Placeholders are declared using double curly braces, e.g., `{{ variable_name }}`. `job_configuration` values can also be hard-coded if the value should not be customizable. + +Each worker type is configured with a default base job template, making it easy to start with a work pool. The default base template defines values that will be passed to every flow run, but can be overridden on a per-deployment or per-flow run basis. + +For example, if we create a `process` work pool named 'above-ground' via the CLI: + +``` +prefect work-pool create --type process above-ground + +``` + + +We see these configuration options available in the Prefect UI: ![process work pool configuration options](/images/work-pools-workers2.png) + +For a `process` work pool with the default base job template, we can set environment variables for spawned processes, set the working directory to execute flows, and control whether the flow run output is streamed to workers' standard output. You can also see an example of JSON formatted base job template with the 'Advanced' tab. + +You can examine the default base job template for a given worker type by running: + +``` +prefect work-pool get-default-base-job-template --type process + +``` + + +```python +{ + "job_configuration": { + "command": "{{ command }}", + "env": "{{ env }}", + "labels": "{{ labels }}", + "name": "{{ name }}", + "stream_output": "{{ stream_output }}", + "working_dir": "{{ working_dir }}" + }, + "variables": { + "type": "object", + "properties": { + "name": { + "title": "Name", + "description": "Name given to infrastructure created by a worker.", + "type": "string" + }, + "env": { + "title": "Environment Variables", + "description": "Environment variables to set when starting a flow run.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "labels": { + "title": "Labels", + "description": "Labels applied to infrastructure created by a worker.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "command": { + "title": "Command", + "description": "The command to use when starting a flow run. In most cases, this should be left blank and the command will be automatically generated by the worker.", + "type": "string" + }, + "stream_output": { + "title": "Stream Output", + "description": "If enabled, workers will stream output from flow run processes to local standard output.", + "default": true, + "type": "boolean" + }, + "working_dir": { + "title": "Working Directory", + "description": "If provided, workers will open flow run processes within the specified path as the working directory. Otherwise, a temporary directory will be created.", + "type": "string", + "format": "path" + } + } + } +} + +``` + + +You can override each of these attributes on a per-deployment or per-flow run basis. When creating a deployment, you can specify these overrides in the `deployments.work_pool.job_variables` section of a `prefect.yaml` file or in the `job_variables` argument of a Python `flow.deploy` method. + +For example, to turn off streaming output for a specific deployment, we could add the following to our `prefect.yaml`: + +``` +deployments: +- name: demo-deployment + entrypoint: demo_project/demo_flow.py:some_work + work_pool: + name: above-ground + job_variables: + stream_output: false + +``` + + +See more about overriding job variables in the [Overriding Job Variables Guide](https://docs.prefect.io/guides/deployment/overriding-job-variables/). + +**Advanced Customization of the Base Job Template** + +For advanced use cases, you can create work pools with fully customizable job templates. This customization is available when creating or editing a work pool on the 'Advanced' tab within the UI or when updating a work pool via the Prefect CLI. + +Advanced customization is useful anytime the underlying infrastructure supports a high degree of customization. In these scenarios a work pool job template allows you to expose a minimal and easy-to-digest set of options to deployment authors. Additionally, these options are the _only_ customizable aspects for deployment infrastructure, which can be useful for restricting functionality in secure environments. For example, the `kubernetes` worker type allows users to specify a custom job template that can be used to configure the manifest that workers use to create jobs for flow execution. + +For more information and advanced configuration examples, see the [Kubernetes Worker](https://prefecthq.github.io/prefect-kubernetes/worker/) documentation. + +For more information on overriding a work pool's job variables see this [guide](https://docs.prefect.io/guides/deployment/overriding-job-variables/). + + +### Viewing work pools + +At any time, users can see and edit configured work pools in the Prefect UI. + +![The UI displays a list of configured work pools](/images/work-pools-workers3.png) + +To view work pools with the Prefect CLI, you can: + +* List (`ls`) all available pools +* Inspect (`inspect`) the details of a single pool +* Preview (`preview`) scheduled work for a single pool + +`prefect work-pool ls` lists all configured work pools for the server. + +For example: + +``` + Work pools +┏━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓ +┃ Name ┃ Type ┃ ID ┃ Concurrency Limit ┃ +┡━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩ +│ barbeque │ docker │ 72c0a101-b3e2-4448-b5f8-a8c5184abd17 │ None │ +│ k8s-pool │ kubernetes │ 7b6e3523-d35b-4882-84a7-7a107325bb3f │ None │ +│ test-pool │ prefect-agent │ a51adf8c-58bb-4949-abe6-1b87af46eabd │ None | +| my-pool │ process │ cd6ff9e8-bfd8-43be-9be3-69375f7a11cd │ None │ +└────────────┴────────────────┴──────────────────────────────────────┴───────────────────┘ + (**) denotes a paused pool + +``` + + +`prefect work-pool inspect` provides all configuration metadata for a specific work pool by ID. + +``` +prefect work-pool inspect 'test-pool' + +``` + + +Outputs information similar to the following: + +``` +Workpool( + id='a51adf8c-58bb-4949-abe6-1b87af46eabd', + created='2 minutes ago', + updated='2 minutes ago', + name='test-pool', + filter=None, +) + +``` + + +`prefect work-pool preview` displays scheduled flow runs for a specific work pool by ID for the upcoming hour. The optional `--hours` flag lets you specify the number of hours to look ahead. + +``` +prefect work-pool preview 'test-pool' --hours 12 + +``` + + +``` +┏━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ Scheduled Star… ┃ Run ID ┃ Name ┃ Deployment ID ┃ +┡━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2022-02-26 06:… │ 741483d4-dc90-4913-b88d-0… │ messy-petrel │ 156edead-fe6a-4783-a618-21… │ +│ 2022-02-26 05:… │ 14e23a19-a51b-4833-9322-5… │ unselfish-g… │ 156edead-fe6a-4783-a618-21… │ +│ 2022-02-26 04:… │ deb44d4d-5fa2-4f70-a370-e… │ solid-ostri… │ 156edead-fe6a-4783-a618-21… │ +│ 2022-02-26 03:… │ 07374b5c-121f-4c8d-9105-b… │ sophisticat… │ 156edead-fe6a-4783-a618-21… │ +│ 2022-02-26 02:… │ 545bc975-b694-4ece-9def-8… │ gorgeous-mo… │ 156edead-fe6a-4783-a618-21… │ +│ 2022-02-26 01:… │ 704f2d67-9dfa-4fb8-9784-4… │ sassy-hedge… │ 156edead-fe6a-4783-a618-21… │ +│ 2022-02-26 00:… │ 691312f0-d142-4218-b617-a… │ sincere-moo… │ 156edead-fe6a-4783-a618-21… │ +│ 2022-02-25 23:… │ 7cb3ff96-606b-4d8c-8a33-4… │ curious-cat… │ 156edead-fe6a-4783-a618-21… │ +│ 2022-02-25 22:… │ 3ea559fe-cb34-43b0-8090-1… │ primitive-f… │ 156edead-fe6a-4783-a618-21… │ +│ 2022-02-25 21:… │ 96212e80-426d-4bf4-9c49-e… │ phenomenal-… │ 156edead-fe6a-4783-a618-21… │ +└─────────────────┴────────────────────────────┴──────────────┴─────────────────────────────┘ + (**) denotes a late run + +``` + + +### Work pool status + +Work pools have three statuses: `READY`, `NOT_READY`, and `PAUSED`. A work pool is considered ready if it has at least one online worker sending heartbeats to the work pool. If a work pool has no online workers, it is considered not ready to execute work. A work pool can be placed in a paused status manually by a user or via an automation. When a paused work pool is unpaused, it will be reassigned the appropriate status based on whether any workers are sending heartbeats. + +### Pausing and deleting work pools + +A work pool can be paused at any time to stop the delivery of work to workers. Workers will not receive any work when polling a paused pool. + +To pause a work pool through the Prefect CLI, use the `prefect work-pool pause` command: + +``` +prefect work-pool pause 'test-pool' + +``` + + +To resume a work pool through the Prefect CLI, use the `prefect work-pool resume` command with the work pool name. + +To delete a work pool through the Prefect CLI, use the `prefect work-pool delete` command with the work pool name. + +### Managing concurrency + +Each work pool can optionally restrict concurrent runs of matching flows. + +For example, a work pool with a concurrency limit of 5 will only release new work if fewer than 5 matching runs are currently in a `Running` or `Pending` state. If 3 runs are `Running` or `Pending`, polling the pool for work will only result in 2 new runs, even if there are many more available, to ensure that the concurrency limit is not exceeded. + +When using the `prefect work-pool` Prefect CLI command to configure a work pool, the following subcommands set concurrency limits: + +* `set-concurrency-limit` sets a concurrency limit on a work pool. +* `clear-concurrency-limit` clears any concurrency limits from a work pool. + +### Work queues + + +**Advanced topic** + +Prefect will automatically create a default work queue if needed. + + +Work queues offer advanced control over how runs are executed. Each work pool has a "default" queue that all work will be sent to by default. Additional queues can be added to a work pool to enable greater control over work delivery through fine grained priority and concurrency. Each work queue has a priority indicated by a unique positive integer. Lower numbers take greater priority in the allocation of work. Accordingly, new queues can be added without changing the rank of the higher-priority queues (e.g. no matter how many queues you add, the queue with priority `1` will always be the highest priority). + +Work queues can also have their own concurrency limits. Note that each queue is also subject to the global work pool concurrency limit, which cannot be exceeded. + +Together work queue priority and concurrency enable precise control over work. For example, a pool may have three queues: A "low" queue with priority `10` and no concurrency limit, a "high" queue with priority `5` and a concurrency limit of `3`, and a "critical" queue with priority `1` and a concurrency limit of `1`. This arrangement would enable a pattern in which there are two levels of priority, "high" and "low" for regularly scheduled flow runs, with the remaining "critical" queue for unplanned, urgent work, such as a backfill. + +Priority is evaluated to determine the order in which flow runs are submitted for execution. If all flow runs are capable of being executed with no limitation due to concurrency or otherwise, priority is still used to determine order of submission, but there is no impact to execution. If not all flow runs can be executed, usually as a result of concurrency limits, priority is used to determine which queues receive precedence to submit runs for execution. + +Priority for flow run submission proceeds from the highest priority to the lowest priority. In the preceding example, all work from the "critical" queue (priority 1) will be submitted, before any work is submitted from "high" (priority 5). Once all work has been submitted from priority queue "critical", work from the "high" queue will begin submission. + +If new flow runs are received on the "critical" queue while flow runs are still in scheduled on the "high" and "low" queues, flow run submission goes back to ensuring all scheduled work is first satisfied from the highest priority queue, until it is empty, in waterfall fashion. + +**Work queue status** + +A work queue has a `READY` status when it has been polled by a worker in the last 60 seconds. Pausing a work queue will give it a `PAUSED` status and mean that it will accept no new work until it is unpaused. A user can control the work queue's paused status in the UI. Unpausing a work queue will give the work queue a `NOT_READY` status unless a worker has polled it in the last 60 seconds. + + +### Local debugging + +As long as your deployment's infrastructure block supports it, you can use work pools to temporarily send runs to a worker running on your local machine for debugging by running `prefect worker start -p my-local-machine` and updating the deployment's work pool to `my-local-machine`. + +Worker overview +----------------------------------------------------- + +Workers are lightweight polling services that retrieve scheduled runs from a work pool and execute them. + +Workers are similar to agents, but offer greater control over infrastructure configuration and the ability to route work to specific types of execution environments. + +Workers each have a type corresponding to the execution environment to which they will submit flow runs. Workers are only able to poll work pools that match their type. As a result, when deployments are assigned to a work pool, you know in which execution environment scheduled flow runs for that deployment will run. + +### Worker types + +Below is a list of available worker types. Note that most worker types will require installation of an additional package. + + +|Worker Type |Description |Required Package | +|------------------------|-------------------------------------------------|------------------| +|process |Executes flow runs in subprocesses | | +|kubernetes |Executes flow runs as Kubernetes jobs |prefect-kubernetes| +|docker |Executes flow runs within Docker containers |prefect-docker | +|ecs |Executes flow runs as ECS tasks |prefect-aws | +|cloud-run |Executes flow runs as Google Cloud Run jobs |prefect-gcp | +|vertex-ai |Executes flow runs as Google Cloud Vertex AI jobs|prefect-gcp | +|azure-container-instance|Execute flow runs in ACI containers |prefect-azure | + + +If you don’t see a worker type that meets your needs, consider [developing a new worker type](https://docs.prefect.io/guides/deployment/developing-a-new-worker-type/)! + +### Worker options + +Workers poll for work from one or more queues within a work pool. If the worker references a work queue that doesn't exist, it will be created automatically. The worker CLI is able to infer the worker type from the work pool. Alternatively, you can also specify the worker type explicitly. If you supply the worker type to the worker CLI, a work pool will be created automatically if it doesn't exist (using default job settings). + +Configuration parameters you can specify when starting a worker include: + + + +| Option | Description | +|----------------------|-------------------------------------------------------------------------------------------------------------------------------------------| +| `--name`, `-n` | The name to give to the started worker. If not provided, a unique name will be generated. | +| `--pool`, `-p` | The work pool the started worker should poll. | +| `--work-queue`, `-q` | One or more work queue names for the worker to pull from. If not provided, the worker will pull from all work queues in the work pool. | +| `--type`, `-t` | The type of worker to start. If not provided, the worker type will be inferred from the work pool. | +| `--prefetch-seconds` | The amount of time before a flow run's scheduled start time to begin submission. Default is the value of PREFECT_WORKER_PREFETCH_SECONDS. | +| `--run-once` | Only run worker polling once. By default, the worker runs forever. | +| `--limit`, `-l` | The maximum number of flow runs to start simultaneously. | +| `--with-healthcheck` | Start a healthcheck server for the worker. | +| `--install-policy` | Install policy to use workers from Prefect integration packages. | + + +You must start a worker within an environment that can access or create the infrastructure needed to execute flow runs. The worker will deploy flow runs to the infrastructure corresponding to the worker type. For example, if you start a worker with type `kubernetes`, the worker will deploy flow runs to a Kubernetes cluster. + +**Prefect must be installed in execution environments** + + +Prefect must be installed in any environment (virtual environment, Docker container, etc.) where you intend to run the worker or execute a flow run. + + + +**`PREFECT_API_URL` and `PREFECT_API_KEY`settings for workers** + + +`PREFECT_API_URL` must be set for the environment in which your worker is running. You must also have a user or service account with the `Worker` role, which can be configured by setting the `PREFECT_API_KEY`. + +### Worker status + +Workers have two statuses: `ONLINE` and `OFFLINE`. A worker is online if it sends regular heartbeat messages to the Prefect API. If a worker has missed three heartbeats, it is considered offline. By default, a worker is considered offline a maximum of 90 seconds after it stopped sending heartbeats, but the threshold can be configured via the `PREFECT_WORKER_HEARTBEAT_SECONDS` setting. + +### Starting a worker + +Use the `prefect worker start` CLI command to start a worker. You must pass at least the work pool name. If the work pool does not exist, it will be created if the `--type` flag is used. + +``` +prefect worker start -p [work pool name] + +``` + + +For example: + +``` +prefect worker start -p "my-pool" + +``` + + +Results in output like this: + +``` +Discovered worker type 'process' for work pool 'my-pool'. +Worker 'ProcessWorker 65716280-96f8-420b-9300-7e94417f2673' started! + +``` + + +In this case, Prefect automatically discovered the worker type from the work pool. To create a work pool and start a worker in one command, use the `--type` flag: + +``` +prefect worker start -p "my-pool" --type "process" + +``` + + +``` +Worker 'ProcessWorker d24f3768-62a9-4141-9480-a056b9539a25' started! +06:57:53.289 | INFO | prefect.worker.process.processworker d24f3768-62a9-4141-9480-a056b9539a25 - Worker pool 'my-pool' created. + +``` + + +In addition, workers can limit the number of flow runs they will start simultaneously with the `--limit` flag. For example, to limit a worker to five concurrent flow runs: + +``` +prefect worker start --pool "my-pool" --limit 5 + +``` + + +### Configuring prefetch + +By default, the worker begins submitting flow runs a short time (10 seconds) before they are scheduled to run. This behavior allows time for the infrastructure to be created so that the flow run can start on time. + +In some cases, infrastructure will take longer than 10 seconds to start the flow run. The prefetch can be increased using the `--prefetch-seconds` option or the `PREFECT_WORKER_PREFETCH_SECONDS` setting. + +If this value is _more_ than the amount of time it takes for the infrastructure to start, the flow run will _wait_ until its scheduled start time. + +### Polling for work + +Workers poll for work every 15 seconds by default. This interval is configurable in your [profile settings](https://docs.prefect.io/concepts/settings/) with the `PREFECT_WORKER_QUERY_SECONDS` setting. + +### Install policy + +The Prefect CLI can install the required package for Prefect-maintained worker types automatically. You can configure this behavior with the `--install-policy` option. The following are valid install policies + + + +| Install Policy | Description | +|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `always` | Always install the required package. Will update the required package to the most recent version if already installed. | +| `if-not-present` | Install the required package if it is not already installed. | +| `never` | Never install the required package. | +| `prompt` | Prompt the user to choose whether to install the required package. This is the default install policy. If `prefect worker start` is run non-interactively, the `prompt install policy` will behave the same as never. | + +### Additional resources + +See how to daemonize a Prefect worker in [this guide](https://docs.prefect.io/guides/deployment/daemonize/). + +For more information on overriding a work pool's job variables see this [guide](https://docs.prefect.io/guides/deployment/overriding-job-variables/). \ No newline at end of file diff --git a/docs/2.19.x/getting-started/installation.mdx b/docs/2.19.x/getting-started/installation.mdx new file mode 100644 index 000000000000..6e8c67b6e1e8 --- /dev/null +++ b/docs/2.19.x/getting-started/installation.mdx @@ -0,0 +1,172 @@ +--- +title: Installation +description: Prefect requires Python 3.8 or newer. +--- + +

+ + + +

+ +We recommend installing Prefect using a Python virtual environment manager such as `pipenv`, `conda`, or `virtualenv`/`venv`. + +You can use [Prefect Cloud](https://docs.prefect.io/ui/cloud/) as your API server or [host your own Prefect server instance](https://docs.prefect.io/host/) backed by [PostgreSQL](https://docs.prefect.io/concepts/database/#configuring_a_postgresql_database). For development, you can use [SQLite](https://docs.prefect.io/concepts/database/#configuring_a_sqlite_database) 2.24 or newer as your database. + +[Prefect Cloud](https://docs.prefect.io/cloud/) is a managed solution that provides strong scaling, performance, and security. Learn more about Prefect Cloud solutions for enterprises [here](https://www.prefect.io/pricing). + + +**Windows and Linux requirements** + +See [Windows installation notes](#windows-installation-notes) and [Linux installation notes](#linux-installation-notes) for details on additional installation requirements and considerations. + +## Install Prefect + +The following sections describe how to install Prefect in your environment. + +### Installing the latest version + +Prefect is published as a Python package. To install the latest release or upgrade an existing Prefect install, and upgrade existing Python dependencies, run the following command in your terminal: + +```python +pip install -U prefec +``` + +To install a specific Prefect version, specify the version number like this: + +```python +pip install -U "prefect==2.17.1 +``` + + +See available release versions in the [Prefect Release Notes](https://github.com/PrefectHQ/prefect/blob/main/RELEASE-NOTES.md). + +See our [Contributing guide](https://docs.prefect.io/contributing/overview/) for instructions on installing Prefect for development and see the [section below](#installing_unreleased_code) to install directly from the `main` branch. + +### Checking your installation + +To confirm that Prefect was installed correctly, run the command `prefect version` in your terminal. + +You should see output similar to the following: + +```python +Version: 2.17.1 +API version: 0.8.4 +Python version: 3.12.2 +Git commit: d6bdb075 +Built: Thu, Apr 11, 2024 6:58 PM +OS/Arch: darwin/arm64 +Profile: local +Server type: ephemeral +Server: + Database: sqlite + SQLite version: 3.45.2 +``` + + +## Windows installation notes + +You can install and run Prefect via Windows PowerShell, the Windows Command Prompt, or [`conda`](https://docs.conda.io/projects/conda/en/latest/user-guide/install/windows.html). After installation, you may need to manually add the Python local packages `Scripts` folder to your `Path` environment variable. + +The `Scripts` folder path looks something like this (the username and Python version may be different on your system): + +```python +C:\Users\MyUserNameHere\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\Scripts +``` + + +Watch the `pip install` output messages for the `Scripts` folder path on your system. + +If you're using Windows Subsystem for Linux (WSL), see [Linux installation notes](#linux-installation-notes). + +## Linux installation notes + +Linux is a popular operating system for running Prefect. If you are hosting your own Prefect server instance with a SQLite database, note that certain Linux versions of SQLite can be problematic. Compatible versions include Ubuntu 22.04 LTS and Ubuntu 20.04 LTS. + +Alternatively, you can [install SQLite on Red Hat Custom Linux (RHEL)](#install-sqlite-on-rhel) or use the `conda` virtual environment manager and configure a compatible SQLite version. + +## Using a self-signed SSL certificate + +If you're using a self-signed SSL certificate, you need to configure your environment to trust the certificate. You can add the certificate to your system bundle and pointing your tools to use that bundle by configuring the `SSL_CERT_FILE` environment variable. + +If the certificate is not part of your system bundle, you can set the `PREFECT_API_TLS_INSECURE_SKIP_VERIFY` to `True` to disable certificate verification altogether. + +**_Note:_** Disabling certificate validation is insecure and only suggested as an option for testing! + +## Proxies + +Prefect supports communicating via proxies through environment variables. Whether you are using Prefect Cloud or hosting your own Prefect server instance, set `HTTPS_PROXY` and `SSL_CERT_FILE` in your environment, and the underlying network libraries will route Prefect’s requests appropriately. + +Alternatively, the Prefect library will connect to the API via any proxies you have listed in the `HTTP_PROXY` or `ALL_PROXY` environment variables. +You may also use the `NO_PROXY` environment variable to specify which hosts should not be sent through the proxy. + +For more information about these environment variables, see the [cURL documentation](https://everything.curl.dev/usingcurl/proxies/env). + +## `prefect-client` library + +The `prefect-client` library is a minimal installation of Prefect designed for interacting with Prefect Cloud or a remote self-hosted server instance. + +`prefect-client` enables a subset of Prefect's functionality with a smaller installation size, making it ideal for use in lightweight, resource-constrained, or ephemeral environments. It omits all CLI and server components found in the `prefect` library. + +Install the latest version with: + +```python +pip install -U prefect-client +``` + + +## SQLite + +By default, a local Prefect server instance uses SQLite as the backing database. SQLite is not packaged with the Prefect installation. Most systems will already have SQLite installed, because it is typically bundled with Python. + + + +If you install the [`prefect-client`](https://pypi.org/project/prefect-client/) library that provides a limited set of the full Prefect library's functionality, you do not need SQLite installed. + +### Install SQLite on RHEL + +To install an appropriate version of SQLite on Red Hat Custom Linux (RHEL), follow the instructions below: + +Expand for instructions Note that some RHEL instances have no C compiler, so you may need to check for and install \`gcc\` first: Download and extract the tarball for SQLite. + +```python +wget https://www.sqlite.org/2022/sqlite-autoconf-3390200.tar.gz +tar -xzf sqlite-autoconf-3390200.tar.gz +``` + + +Move to the extracted SQLite directory, then build and install SQLite. + +```python +cd sqlite-autoconf-3390200/ +./configure +make +make install +``` + + +Add \`LD\_LIBRARY\_PATH\` to your profile. + +```python +echo 'export LD_LIBRARY_PATH="/usr/local/lib"' >> /etc/profile +``` + + +Restart your shell to register these changes. Now you can install Prefect using \`pip\`. + +## Installing unreleased code + +To use the most up-to-date, unreleased Prefect code, you can install directly off the `main` GitHub branch: + +```python +pip install -U git+https://github.com/PrefectHQ/prefect +``` + + +The `main` branch may not be stable + +Please be aware that this method installs unreleased code and may not be stable. + +## Next steps + +Now that you have Prefect installed and your environment configured, check out the [Tutorial](https://docs.prefect.io/tutorial/) to get more familiar with Prefect. \ No newline at end of file diff --git a/docs/2.19.x/getting-started/overview.mdx b/docs/2.19.x/getting-started/overview.mdx new file mode 100644 index 000000000000..4e7257974a68 --- /dev/null +++ b/docs/2.19.x/getting-started/overview.mdx @@ -0,0 +1,47 @@ +--- +title: Welcome to Prefect +mode: wide +description: Prefect is a workflow orchestration tool empowering developers to build, observe, and react to data pipelines. +--- + +It's the easiest way to transform any Python function into a unit of work that can be observed and orchestrated. Just bring your Python code, sprinkle in a few decorators, and go! + +With Prefect you gain: + + + + + + + + + + + + + + + + + + + +![screenshot of Prefect Cloud dashboard](https://docs.prefect.io/latest/img/ui/cloud-dashboard.png) + +## New to Prefect? + +Get up and running quickly with the [quickstart guide](https://docs.prefect.io/getting-started/quickstart/). + +Want more hands on practice to productionize your workflows? Follow our [tutorial](https://docs.prefect.io/tutorial/). + +For deeper dives into common use cases, explore our [guides](https://docs.prefect.io/guides/index/). + +Take your understanding even further with Prefect's [concepts](https://docs.prefect.io/concepts/index/) and [API reference](https://docs.prefect.io/api-ref/). + +Join Prefect's [vibrant community](https://docs.prefect.io/community/) of over 26,000 engineers to learn with others and share your knowledge! + + +**Need help?** + +Get your questions answered by a Prefect Product Advocate! [Book a Meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) + diff --git a/docs/2.19.x/getting-started/quickstart.mdx b/docs/2.19.x/getting-started/quickstart.mdx new file mode 100644 index 000000000000..92deef7b9dc3 --- /dev/null +++ b/docs/2.19.x/getting-started/quickstart.mdx @@ -0,0 +1,254 @@ +--- +title: Quickstart +description: Prefect is an orchestration and observability platform that empowers developers to build and scale code quickly, turning their Python scripts into resilient, recurring workflows. +--- + +In this quickstart, you'll see how you can schedule your code on remote infrastructure and observe the state of your workflows. With Prefect, you can go from a Python script to a production-ready workflow that runs remotely in a few minutes. + +Let's get started! + +Setup +--------------------------------- + +Here's a basic script that fetches statistics about the [main Prefect GitHub repository](https://github.com/PrefectHQ/prefect). + +```python +import httpx + +def get_repo_info(): + url = "https://api.github.com/repos/PrefectHQ/prefect" + response = httpx.get(url) + repo = response.json() + print("PrefectHQ/prefect repository statistics 🤓:") + print(f"Stars 🌠 : {repo['stargazers_count']}") + +if __name__ == "__main__": + get_repo_info() + +``` + + +Let's make this script schedulable, observable, resilient, and capable of running anywhere. + +Step 1: Install Prefect +-------------------------------------------------------------------- + +See the [install guide](https://docs.prefect.io/getting-started/installation/) for more detailed installation instructions, if needed. + +Step 2: Connect to Prefect's API +------------------------------------------------------------------------------------- + +Much of Prefect's functionality is backed by an API. The easiest way to get started is to use the API hosted by Prefect: + +1. Create a forever-free Prefect Cloud account or sign in at [https://app.prefect.cloud/](https://app.prefect.cloud/) +2. Use the `prefect cloud login` CLI command to log in to Prefect Cloud from your development environment + +``` +prefect cloud login +``` + +Choose **Log in with a web browser** and click the **Authorize** button in the browser window that opens. Your CLI is now authenticated with your Prefect Cloud account through a locally-stored API key that expires in 30 days. + +If you have any issues with browser-based authentication, see the [Prefect Cloud docs](https://docs.prefect.io/cloud/users/api-keys/) to learn how to authenticate with a manually created API key. + + +**Self-hosted Prefect server instance** + +If you would like to host a Prefect server instance on your own infrastructure, see the [tutorial](https://docs.prefect.io/tutorial/) and select the "Self-hosted" tab. Note that you will need to both host your own server and run your flows on your own infrastructure. + + +Step 3: Turn your function into a Prefect flow +------------------------------------------------------------------------------------------------------------------ + +The fastest way to get started with Prefect is to add a `@flow` decorator to your Python function. [Flows](https://docs.prefect.io/concepts/flows/) are the core observable, deployable units in Prefect and are the primary entrypoint to orchestrated work. + + + +```python my_gh_workflow.py +import httpx # an HTTP client library and dependency of Prefect +from prefect import flow, task + + +@task(retries=2) +def get_repo_info(repo_owner: str, repo_name: str): + """Get info about a repo - will retry twice after failing""" + url = f"https://api.github.com/repos/{repo_owner}/{repo_name}" + api_response = httpx.get(url) + api_response.raise_for_status() + repo_info = api_response.json() + return repo_info + + +@task +def get_contributors(repo_info: dict): + """Get contributors for a repo""" + contributors_url = repo_info["contributors_url"] + response = httpx.get(contributors_url) + response.raise_for_status() + contributors = response.json() + return contributors + + +@flow(log_prints=True) +def repo_info(repo_owner: str = "PrefectHQ", repo_name: str = "prefect"): + """ + Given a GitHub repository, logs the number of stargazers + and contributors for that repo. + """ + repo_info = get_repo_info(repo_owner, repo_name) + print(f"Stars 🌠 : {repo_info['stargazers_count']}") + + contributors = get_contributors(repo_info) + print(f"Number of contributors 👷: {len(contributors)}") + + +if __name__ == "__main__": + repo_info() + +``` + + +Note that we added a `log_prints=True` argument to the `@flow` decorator so that `print` statements within the flow-decorated function will be logged. Also note that our flow calls two tasks, which are defined by the `@task` decorator. Tasks are the smallest unit of observed and orchestrated work in Prefect. + +``` +python my_gh_workflow.py +``` + +Now when we run this script, Prefect will automatically track the state of the flow run and log the output where we can see it in the UI and CLI. + +```python +14:28:31.099 | INFO | prefect.engine - Created flow run 'energetic-panther' for flow 'repo-info' +14:28:31.100 | INFO | Flow run 'energetic-panther' - View at https://app.prefect.cloud/account/123/workspace/abc/flow-runs/flow-run/xyz +14:28:32.178 | INFO | Flow run 'energetic-panther' - Created task run 'get_repo_info-0' for task 'get_repo_info' +14:28:32.179 | INFO | Flow run 'energetic-panther' - Executing 'get_repo_info-0' immediately... +14:28:32.584 | INFO | Task run 'get_repo_info-0' - Finished in state Completed() +14:28:32.599 | INFO | Flow run 'energetic-panther' - Stars 🌠 : 13609 +14:28:32.682 | INFO | Flow run 'energetic-panther' - Created task run 'get_contributors-0' for task 'get_contributors' +14:28:32.682 | INFO | Flow run 'energetic-panther' - Executing 'get_contributors-0' immediately... +14:28:33.118 | INFO | Task run 'get_contributors-0' - Finished in state Completed() +14:28:33.134 | INFO | Flow run 'energetic-panther' - Number of contributors 👷: 30 +14:28:33.255 | INFO | Flow run 'energetic-panther' - Finished in state Completed('All states completed.') + +``` + + +You should see similar output in your terminal, with your own randomly generated flow run name and your own Prefect Cloud account URL. + +Step 4: Choose a remote infrastructure location +-------------------------------------------------------------------------------------------------------------------- + +Let's get this workflow running on infrastructure other than your local machine! We can tell Prefect where we want to run our workflow by creating a [work pool](https://docs.prefect.io/concepts/work-pools/). + +We can have Prefect Cloud run our flow code for us with a Prefect Managed work pool. + +Let's create a [Prefect Managed work pool](https://docs.prefect.io/guides/managed-execution/) so that Prefect can run our flows for us. We can create a work pool in the UI or from the CLI. Let's use the CLI: + +``` +prefect work-pool create my-managed-pool --type prefect:managed + +``` + + +You should see a message in the CLI that your work pool was created. Feel free to check out your new work pool on the **Work Pools** page in the UI. + +Step 5: Make your code schedulable +------------------------------------------------------------------------------------------ + +We have a flow function and we have a work pool where we can run our flow remotely. Let's package both of these things, along with the location for where to find our flow code, into a [deployment](https://docs.prefect.io/concepts/deployments/) so that we can schedule our workflow to run remotely. + +Deployments elevate flows to remotely configurable entities that have their own API. + +Let's make a script to build a deployment with the name _my-first-deployment_ and set it to run on a schedule. + + +```python create_deployment.py +from prefect import flow + +if __name__ == "__main__": + flow.from_source( + source="https://github.com/prefecthq/demos.git", + entrypoint="my_gh_workflow.py:repo_info", + ).deploy( + name="my-first-deployment", + work_pool_name="my-managed-pool", + cron="0 1 * * *", + ) + +``` + + +Run the script to create the deployment on the Prefect Cloud server. Note that the `cron` argument will schedule the deployment to run at 1am every day. + +``` +python create_deployment.py + +``` + + +You should see a message that your deployment was created, similar to the one below. + +``` +Successfully created/updated all deployments! +______________________________________________________ +| Deployments | +______________________________________________________ +| Name | Status | Details | +______________________________________________________ +| repo-info/my-first-deployment | applied | | +______________________________________________________ + +To schedule a run for this deployment, use the following command: + + $ prefect deployment run 'repo-info/my-first-deployment' + +You can also run your flow via the Prefect UI: + +``` + + +Head to the **Deployments** page of the UI to check it out. + + +**Code storage options** + +You can store your flow code in nearly any location. You just need to tell Prefect where to find it. In this example, we use a GitHub repository, but you could bake your code into a Docker image or store it in cloud provider storage. Read more in [this guide](https://docs.prefect.io/guides/prefect-deploy/#creating-work-pool-based-deployments). + + + +**Push your code to GitHub** + +In the example above, we use an existing GitHub repository. If you make changes to the flow code, you will need to push those changes to your own GitHub account and update the `source` argument to point to your repository. + + +You can trigger a manual run of this deployment by either clicking the **Run** button in the top right of the deployment page in the UI, or by running the following CLI command in your terminal: + +``` +prefect deployment run 'repo-info/my-first-deployment' + +``` + + +The deployment is configured to run on a Prefect Managed work pool, so Prefect will automatically spin up the infrastructure to run this flow. It may take a minute to set up the Docker image in which the flow will run. + +After a minute or so, you should see the flow run graph and logs on the Flow Run page in the UI. + +![Managed flow run graph and logs](https://docs.prefect.io/img/ui/qs-flow-run.png) + + +**Remove the schedule** + +Click the **Remove** button in the top right of the **Deployment** page so that the workflow is no longer scheduled to run once per day. + + +Next steps +------------------------------------------- + +You've seen how to move from a Python script to a scheduled, observable, remotely orchestrated workflow with Prefect. + +To learn how to run flows on your own infrastructure, customize the Docker image where your flow runs, and gain more orchestration and observation benefits check out the [tutorial](https://docs.prefect.io/tutorial/). + +**Need help?** + +Get your questions answered by a Prefect Product Advocate! [Book a meeting](https://calendly.com/prefect-experts/prefect-product-advocates?utm_campaign=prefect_docs_cloud&utm_content=prefect_docs&utm_medium=docs&utm_source=docs) + +Happy building! \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/development/automations.mdx b/docs/2.19.x/how-to-guides/development/automations.mdx new file mode 100644 index 000000000000..dae897ba3729 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/automations.mdx @@ -0,0 +1,485 @@ +--- +title: Using Automations for Dynamic Responses +sidebarTitle: Automations +--- + +From the [Automations concept page](https://docs.prefect.io/concepts/automations/), we saw what an automation can do and how to configure one within the UI. + +In this guide, we will showcase the following common use cases: + +* Create a simple notification automation in just a few UI clicks +* Build upon an event based automation +* Combine into a multi-layered responsive deployment pattern + + +**Available only on Prefect Cloud** + +``` +Automations are a Prefect Cloud feature. + +``` + + +Prerequisites +------------------------------------------------- + +Please have the following before exploring the guide: + +* Python installed +* Prefect installed (follow the [installation guide](https://docs.prefect.io/getting-started/installation/)) +* Authenticated to a [Prefect Cloud workspace](https://docs.prefect.io/getting-started/quickstart/#step-2-connect-to-prefects-api/) +* A [work pool](https://docs.prefect.io/concepts/work-pools/) set up to handle the deployments + +Creating the example script +----------------------------------------------------------------------------- + +Automations allow you to take actions in response to triggering events recorded by Prefect. + +For example, let's try to grab data from an API and send a notification based on the end state. + +We can start by pulling hypothetical user data from an endpoint and then performing data cleaning and transformations. + +Let's create a simple extract method, that pulls the data from a random user data generator endpoint. + +```python +from prefect import flow, task, get_run_logger +import requests +import json + +@task +def fetch(url: str): + logger = get_run_logger() + response = requests.get(url) + raw_data = response.json() + logger.info(f"Raw response: {raw_data}") + return raw_data + +@task +def clean(raw_data: dict): + print(raw_data.get('results')[0]) + results = raw_data.get('results')[0] + logger = get_run_logger() + logger.info(f"Cleaned results: {results}") + return results['name'] + +@flow +def build_names(num: int = 10): + df = [] + url = "https://randomuser.me/api/" + logger = get_run_logger() + copy = num + while num != 0: + raw_data = fetch(url) + df.append(clean(raw_data)) + num -= 1 + logger.info(f"Built {copy} names: {df}") + return df + +if __name__ == "__main__": + list_of_names = build_names() + +``` + + +The data cleaning workflow has visibility into each step, and we are sending a list of names to our next step of our pipeline. + +Create notification block within the UI +----------------------------------------------------------------------------------------------------- + +Now let's try to send a notification based off a completed state outcome. We can configure a notification to be sent so that we know when to look into our workflow logic. + +1. Prior to creating the automation, let's confirm the notification location. We have to create a notification block to help define where the notification will be sent. + +![](/images/automations1.png) + +2. Let's navigate to the blocks page on the UI, and click into creating an email notification block. + +![](/images/automations2.png) + +3. Now that we created a notification block, we can go to the automations page to create our first automation. + +![](/images/automations3.png) + +4. Next we try to find the trigger type, in this case let's use a flow completion. + +![](/images/automations4.png) + +5. Finally, let's create the actions that will be done once the triggered is hit. In this case, let's create a notification to be sent out to showcase the completion. + +![](/images/automations5.png) + +6. Now the automation is ready to be triggered from a flow run completion. Let's run the file locally and see that the notification is sent to our inbox after the completion. It may take a few minutes for the notification to arrive. + +![](/images/automations6.png) + + +**No deployment created** + +Keep in mind, we did not need to create a deployment to trigger our automation, where a state outcome of a local flow run helped trigger this notification block. We are not required to create a deployment to trigger a notification. + + +Now that you've seen how to create an email notification from a flow run completion, let's see how we can kick off a deployment run in response to an event. + +Event-based deployment automation +----------------------------------------------------------------------------------------- + +We can create an automation that can kick off a deployment instead of a notification. Let's explore how we can programmatically create this automation. We will take advantage of Prefect's REST API to help create this automation. + +See the [REST API documentation](https://docs.prefect.io/latest/api-ref/rest-api/#interacting-with-the-rest-api) as a reference for interacting with the Prefect Cloud automation endpoints. + +Let's create a deployment where we can kick off some work based on how long a flow is running. For example, if the `build_names` flow is taking too long to execute, we can kick off a deployment of the with the same `build_names` flow, but replace the `count` value with a lower number - to speed up completion. You can create a deployment with a `prefect.yaml` file or a Python file that uses `flow.deploy`. + + + +Create a `prefect.yaml` file like this one for our flow `build_names`: + +```python + # Welcome to your prefect.yaml file! You can use this file for storing and managing + # configuration for deploying your flows. We recommend committing this file to source + # control along with your flow code. + + # Generic metadata about this project + name: automations-guide + prefect-version: 2.13.1 + + # build section allows you to manage and build docker images + build: null + + # push section allows you to manage if and how this project is uploaded to remote locations + push: null + + # pull section allows you to provide instructions for cloning this project in remote locations + pull: + - prefect.deployments.steps.set_working_directory: + directory: /Users/src/prefect/Playground/automations-guide + + # the deployments section allows you to provide configuration for deploying flows + deployments: + - name: deploy-build-names + version: null + tags: [] + description: null + entrypoint: test-automations.py:build_names + parameters: {} + work_pool: + name: tutorial-process-pool + work_queue_name: null + job_variables: {} + schedule: null + +``` + + +To follow a more Python based approach to create a deployment, you can use `flow.deploy` as in the example below. + +```python +# .deploy only needs a name, valid work pool +# and a reference to where the flow code exists + +if __name__ == "__main__": +build_names.deploy( + name="deploy-build-names", + work_pool_name="tutorial-process-pool" + image="my_registry/my_image:my_image_tag", +) + +``` + + + + + +To follow a more Python based approach to create a deployment, you can use `flow.deploy` as in the example below. + +```python +# .deploy only needs a name, valid work pool +# and a reference to where the flow code exists + +if __name__ == "__main__": +build_names.deploy( + name="deploy-build-names", + work_pool_name="tutorial-process-pool" + image="my_registry/my_image:my_image_tag", +) +``` + + + + +Now let's grab our `deployment_id` from this deployment, and embed it in our automation. There are many ways to obtain the `deployment_id`, but the CLI is a quick way to see all of your deployment ids. + + + +**Find deployment\_id from the CLI** + +The quickest way to see the ID's associated with your deployment would be running `prefect deployment ls` in an authenticated command prompt, and you will be able to see the id's associated with all of your deployments + + +``` +prefect deployment ls + Deployments +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ Name ┃ ID ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ Extract islands/island-schedule │ d9d7289c-7a41-436d-8313-80a044e61532 │ +│ build-names/deploy-build-names │ 8b10a65e-89ef-4c19-9065-eec5c50242f4 │ +│ ride-duration-prediction-backfill/backfill-deployment │ 76dc6581-1773-45c5-a291-7f864d064c57 │ +└───────────────────────────────────────────────────────┴──────────────────────────────────────┘ + +``` + + +We can create an automation via a POST call, where we can programmatically create the automation. Ensure you have your `api_key`, `account_id`, and `workspace_id`. + +```python +def create_event_driven_automation(): + api_url = f"https://api.prefect.cloud/api/accounts/{account_id}/workspaces/{workspace_id}/automations/" + data = { + "name": "Event Driven Redeploy", + "description": "Programmatically created an automation to redeploy a flow based on an event", + "enabled": "true", + "trigger": { + "after": [ + "string" + ], + "expect": [ + "prefect.flow-run.Running" + ], + "for_each": [ + "prefect.resource.id" + ], + "posture": "Proactive", + "threshold": 30, + "within": 0 + }, + "actions": [ + { + "type": "run-deployment", + "source": "selected", + "deployment_id": "YOUR-DEPLOYMENT-ID", + "parameters": "10" + } + ], + "owner_resource": "string" + } + + headers = {"Authorization": f"Bearer {PREFECT_API_KEY}"} + response = requests.post(api_url, headers=headers, json=data) + + print(response.json()) + return response.json() + +``` + + +After running this function, you will see within the UI the changes that came from the post request. Keep in mind, the context will be "custom" on UI. + +Let's run the underlying flow and see the deployment get kicked off after 30 seconds elapsed. This will result in a new flow run of `build_names`, and we are able to see this new deployment get initiated with the custom parameters we outlined above. + +In a few quick changes, we are able to programmatically create an automation that deploys workflows with custom parameters. + +Using an underlying .yaml file +---------------------------------------------------------------------------------- + +We can extend this idea one step further by utilizing our own .yaml version of the automation, and registering that file with our UI. This simplifies the requirements of the automation by declaring it in its own .yaml file, and then registering that .yaml with the API. + +Let's first start with creating the .yaml file that will house the automation requirements. Here is how it would look like: + +```python +name: Cancel long running flows +description: Cancel any flow run after an hour of execution +trigger: + match: + "prefect.resource.id": "prefect.flow-run.*" + match_related: {} + after: + - "prefect.flow-run.Failed" + expect: + - "prefect.flow-run.*" + for_each: + - "prefect.resource.id" + posture: "Proactive" + threshold: 1 + within: 30 +actions: + - type: "cancel-flow-run" + +``` + + +We can then have a helper function that applies this YAML file with the REST API function. + +```python +import yaml + +from utils import post, put + +def create_or_update_automation(path: str = "automation.yaml"): + """Create or update an automation from a local YAML file""" + # Load the definition + with open(path, "r") as fh: + payload = yaml.safe_load(fh) + + # Find existing automations by name + automations = post("/automations/filter") + existing_automation = [a["id"] for a in automations if a["name"] == payload["name"]] + automation_exists = len(existing_automation) > 0 + + # Create or update the automation + if automation_exists: + print(f"Automation '{payload['name']}' already exists and will be updated") + put(f"/automations/{existing_automation[0]}", payload=payload) + else: + print(f"Creating automation '{payload['name']}'") + post("/automations/", payload=payload) + +if __name__ == "__main__": + create_or_update_automation() + +``` + + +You can find a complete repo with these APIs examples in this [GitHub repository](https://github.com/EmilRex/prefect-api-examples/tree/main). + +In this example, we managed to create the automation by registering the .yaml file with a helper function. This offers another experience when trying to create an automation. + +Custom webhook kicking off an automation +------------------------------------------------------------------------------------------------------- + +We can use webhooks to expose the events API which allows us to extend the functionality of deployments and ways to respond to changes in our workflow through a few easy steps. + +By exposing a webhook endpoint, we can kick off workflows that can trigger deployments - all from a simple event created from an HTTP request. + +Lets create a webhook within the UI. Here is the webhook we can use to create these dynamic events. + +```python +{ + "event": "model-update", + "resource": { + "prefect.resource.id": "product.models.{{ body.model_id}}", + "prefect.resource.name": "{{ body.friendly_name }}", + "run_count": "{{body.run_count}}" + } +} + +``` + + +From a simple input, we can easily create an exposed webhook endpoint. + +![webhook-simple](/images/automations7.png) + +Each webhook will correspond to a custom event created, where you can react to it downstream with a separate deployment or automation. + +For example, we can create a curl request that sends the endpoint information such as a run count for our deployment. + +``` +curl -X POST https://api.prefect.cloud/hooks/34iV2SFke3mVa6y5Y-YUoA -d "model_id=adhoc" -d "run_count=10" -d "friendly_name=test-user-input" + +``` + + +From here, we can make a webhook that is connected to pulling in parameters on the curl command, and then it kicks off a deployment that uses these pulled parameters. ![Webhook created](/images/automations8.png) + +Let us go into the event feed, and we can automate straight from this event. ![Webhook automate](/images/automations9.png) + +This allows us to create automations that respond to these webhook events. From a few clicks in the UI, we are able to associate an external process with the Prefect events API, that can enable us to trigger downstream deployments. ![Automation custom](/images/automations10.png) + +In the next section, we will explore event triggers that automate the kickoff of a deployment run. + +Using triggers +--------------------------------------------------- + +Let's take this idea one step further, by creating a deployment that will be triggered when a flow run takes longer than expected. We can take advantage of Prefect's [Marvin](https://www.askmarvin.ai/) library that will use an LLM to classify our data. Marvin is great at embedding data science and data analysis applications within your pre-existing data engineering workflows. In this case, we can use [Marvin'd AI functions](https://www.askmarvin.ai/components/ai_function/#ai-function) to help make our dataset more information rich. + +Install Marvin with `pip install marvin` and set you OpenAI API key as shown [here](https://www.askmarvin.ai/welcome/quickstart/) + +We can add a trigger to run a deployment in response to a specific event. + +Let's create an example with Marvin's AI functions. We will take in a pandas DataFrame and use the AI function to analyze it. + +Here is an example of pulling in that data and classifying using Marvin AI. We can help create dummy data based on classifications we have already created. + +```python +from marvin import ai_classifier +from enum import Enum +import pandas as pd + +@ai_fn +def generate_synthetic_user_data(build_of_names: list[dict]) -> list: + """ + Generate additional data for userID (numerical values with 6 digits), location, and timestamp as separate columns and append the data onto 'build_of_names'. Make userID the first column + """ + +@flow +def create_fake_user_dataset(df): + artifact_df = generate_synthetic_user_data(df) + print(artifact_df) + + create_table_artifact( + key="fake-user-data", + table=artifact_df, + description= "Dataset that is comprised of a mix of autogenerated data based on user data" + ) + +if __name__ == "__main__": + create_fake_artifact() + +``` + + +Let's kick off a deployment with a trigger defined in a `prefect.yaml` file. Let's specify what we want to trigger when the event stays in a running state for longer than 30 seconds. + +```python +# Welcome to your prefect.yaml file! You can use this file for storing and managing +# configuration for deploying your flows. We recommend committing this file to source +# control along with your flow code. + +# Generic metadata about this project +name: automations-guide +prefect-version: 2.13.1 + +# build section allows you to manage and build docker images +build: null + +# push section allows you to manage if and how this project is uploaded to remote locations +push: null + +# pull section allows you to provide instructions for cloning this project in remote locations +pull: +- prefect.deployments.steps.set_working_directory: + directory: /Users/src/prefect/Playground/marvin-extension + +# the deployments section allows you to provide configuration for deploying flows +deployments: +- name: create-fake-user-dataset + triggers: + - enabled: true + match: + prefect.resource.id: "prefect.flow-run.*" + after: "prefect.flow-run.Running", + expect: [], + for_each: ["prefect.resource.id"], + parameters: + param_1: 10 + posture: "Proactive" + version: null + tags: [] + description: null + entrypoint: marvin-extension.py:create_fake_user_dataset + parameters: {} + work_pool: + name: tutorial-process-pool + work_queue_name: null + job_variables: {} + schedule: null + +``` + + +Next steps +------------------------------------------- + +You've seen how to create automations via the UI, REST API, and a triggers defined in a `prefect.yaml` deployment definition. + +To learn more about events that can act as automation triggers, see the [events docs](https://docs.prefect.io/concepts/events/). To learn more about event webhooks in particular, see the [webhooks guide](https://docs.prefect.io/guides/webhooks/). \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/development/ci-cd.mdx b/docs/2.19.x/how-to-guides/development/ci-cd.mdx new file mode 100644 index 000000000000..4d9622cf0698 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/ci-cd.mdx @@ -0,0 +1,360 @@ +--- +sidebarTitle: CI/CD +title: CI/CD With Prefect +--- + +Many organizations deploy Prefect workflows via their CI/CD process. Each organization has their own unique CI/CD setup, but a common pattern is to use CI/CD to manage Prefect [deployments](https://docs.prefect.io/concepts/deployments). Combining Prefect's deployment features with CI/CD tools enables efficient management of flow code updates, scheduling changes, and container builds. This guide uses [GitHub Actions](https://docs.github.com/en/actions) to implement a CI/CD process, but these concepts are generally applicable across many CI/CD tools. + +Note that Prefect's primary ways for creating deployments, a `.deploy` flow method or a `prefect.yaml` configuration file, are both designed with building and pushing images to a Docker registry in mind. + +Getting started with GitHub Actions and Prefect +--------------------------------------------------------------------------------------------------------------------- + +In this example, you'll write a GitHub Actions workflow that will run each time you push to your repository's `main` branch. This workflow will build and push a Docker image containing your flow code to Docker Hub, then deploy the flow to Prefect Cloud. + +### Repository secrets + +Your CI/CD process must be able to authenticate with Prefect in order to deploy flows. + +Deploying flows securely and non-interactively in your CI/CD process can be accomplished by saving your `PREFECT_API_URL` and `PREFECT_API_KEY` [as secrets in your repository's settings](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions) so they can be accessed in your CI/CD runner's environment without exposing them in any scripts or configuration files. + +In this scenario, deploying flows involves building and pushing Docker images, so add `DOCKER_USERNAME` and `DOCKER_PASSWORD` as secrets to your repository as well. + +You can create secrets for GitHub Actions in your repository under **Settings -> Secrets and variables -> Actions -> New repository secret**: + +![Creating a GitHub Actions secret](/images/ci-cd.png) + +### Writing a GitHub workflow + +To deploy your flow via GitHub Actions, you'll need a workflow YAML file. GitHub will look for workflow YAML files in the `.github/workflows/` directory in the root of your repository. In their simplest form, GitHub workflow files are made up of triggers and jobs. + +The `on:` trigger is set to run the workflow each time a push occurs on the `main` branch of the repository. + +The `deploy` job is comprised of four `steps`: + +* **`Checkout`** clones your repository into the GitHub Actions runner so you can reference files or run scripts from your repository in later steps. +* **`Log in to Docker Hub`** authenticates to DockerHub so your image can be pushed to the Docker registry in your DockerHub account. [docker/login-action](https://github.com/docker/login-action) is an existing GitHub action maintained by Docker. `with:` passes values into the Action, similar to passing parameters to a function. +* **`Setup Python`** installs your selected version of Python. +* **`Prefect Deploy`** installs the dependencies used in your flow, then deploys your flow. `env:` makes the `PREFECT_API_KEY` and `PREFECT_API_URL` secrets from your repository available as environment variables during this step's execution. + +For reference, the examples below can be found on their respective branches of [this repository](https://github.com/prefecthq/cicd-example). + + +``` +. +├── .github/ +│ └── workflows/ +│ └── deploy-prefect-flow.yaml +├── flow.py +└── requirements.txt + +``` + + +. +├── .github/ +│ └── workflows/ +│ └── deploy-prefect-flow.yaml +├── flow.py +├── prefect.yaml +└── requirements.txt + + + + + + + + + + +`flow.py` +```python +from prefect import flow + +@flow(log_prints=True) +def hello(): + print("Hello!") + +if __name__ == "__main__": + hello.deploy( + name="my-deployment", + work_pool_name="my-work-pool", + image="my_registry/my_image:my_image_tag", + ) + +``` + + +`prefect.yaml` +```python +name: cicd-example +prefect-version: 2.14.11 + +build: + - prefect_docker.deployments.steps.build_docker_image: + id: build_image + requires: prefect-docker>=0.3.1 + image_name: my_registry/my_image + tag: my_image_tag + dockerfile: auto + +push: + - prefect_docker.deployments.steps.push_docker_image: + requires: prefect-docker>=0.3.1 + image_name: "{{ build_image.image_name }}" + tag: "{{ build_image.tag }}" + +pull: null + +deployments: + - name: my-deployment + entrypoint: flow.py:hello + work_pool: + name: my-work-pool + work_queue_name: default + job_variables: + image: "{{ build-image.image }}" +``` + +`.github/workflows/deploy-prefect-flow.yaml` +```python +name: Deploy Prefect flow + +on: + push: + branches: + - main + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Prefect Deploy + env: + PREFECT_API_KEY: ${{ secrets.PREFECT_API_KEY }} + PREFECT_API_URL: ${{ secrets.PREFECT_API_URL }} + run: | + pip install -r requirements.txt + python flow.py + +``` + + +### Running a GitHub workflow + +After pushing commits to your repository, GitHub will automatically trigger a run of your workflow. The status of running and completed workflows can be monitored from the **Actions** tab of your repository. + +![A GitHub Action triggered via push](/images/ci-cd1.png) + +You can view the logs from each workflow step as they run. The `Prefect Deploy` step will include output about your image build and push, and the creation/update of your deployment. + +``` +Successfully built image '***/cicd-example:latest' + +Successfully pushed image '***/cicd-example:latest' + +Successfully created/updated all deployments! + + Deployments +┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━┓ +┃ Name ┃ Status ┃ Details ┃ +┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━┩ +│ hello/my-deployment │ applied │ │ +└─────────────────────┴─────────┴─────────┘ + +``` + + +Advanced example +------------------------------------------------------- + +In more complex scenarios, CI/CD processes often need to accommodate several additional considerations to enable a smooth development workflow: + +* Making code available in different environments as it advances through stages of development +* Handling independent deployment of distinct groupings of work, as in a monorepo +* Efficiently using build time to avoid repeated work + +This [example repository](https://github.com/prefecthq/cicd-example-workspaces) demonstrates how each of these considerations can be addressed using a combination of Prefect's and GitHub's capabilities. + +### Deploying to multiple workspaces + +Which deployment processes should run are automatically selected when changes are pushed depending on two conditions: + +``` +on: + push: + branches: + - stg + - main + paths: + - "project_1/**" + +``` + + +1. **`branches:`** - which branch has changed. This will ultimately select which Prefect workspace a deployment is created or updated in. In this example, changes on the `stg` branch will deploy flows to a staging workspace, and changes on the `main` branch will deploy flows to a production workspace. +2. **`paths:`** - which project folders' files have changed. Since each project folder contains its own flows, dependencies, and `prefect.yaml`, it represents a complete set of logic and configuration that can be deployed independently. Each project in this repository gets its own GitHub Actions workflow YAML file. + +The `prefect.yaml` file in each project folder depends on environment variables that are dictated by the selected job in each CI/CD workflow, enabling external code storage for Prefect deployments that is clearly separated across projects and environments. + +``` + . + ├── cicd-example-workspaces-prod # production bucket + │ ├── project_1 + │ └── project_2 + └── cicd-example-workspaces-stg # staging bucket + ├── project_1 + └── project_2 + +``` + + +Since the deployments in this example use S3 for code storage, it's important that push steps place flow files in separate locations depending upon their respective environment and project so no deployment overwrites another deployment's files. + +### Caching build dependencies + +Since building Docker images and installing Python dependencies are essential parts of the deployment process, it's useful to rely on caching to skip repeated build steps. + +The `setup-python` action offers [caching options](https://github.com/actions/setup-python#caching-packages-dependencies) so Python packages do not have to be downloaded on repeat workflow runs. + +``` +- name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: "pip" + +``` + + +``` +Using cached prefect-2.16.1-py3-none-any.whl (2.9 MB) +Using cached prefect_aws-0.4.10-py3-none-any.whl (61 kB) + +``` + + +The `build-push-action` for building Docker images also offers [caching options for GitHub Actions](https://docs.docker.com/build/cache/backends/gha/). If you are not using GitHub, other remote [cache backends](https://docs.docker.com/build/cache/backends/) are available as well. + +``` +- name: Build and push + id: build-docker-image + env: + GITHUB_SHA: ${{ steps.get-commit-hash.outputs.COMMIT_HASH }} + uses: docker/build-push-action@v5 + with: + context: ${{ env.PROJECT_NAME }}/ + push: true + tags: ${{ secrets.DOCKER_USERNAME }}/${{ env.PROJECT_NAME }}:${{ env.GITHUB_SHA }}-stg + cache-from: type=gha + cache-to: type=gha,mode=max + +``` + + +``` +importing cache manifest from gha:*** +DONE 0.1s + +[internal] load build context +transferring context: 70B done +DONE 0.0s + +[2/3] COPY requirements.txt requirements.txt +CACHED + +[3/3] RUN pip install -r requirements.txt +CACHED + +``` + + +Prefect GitHub Actions +------------------------------------------------------------------- + +Prefect provides its own GitHub Actions for [authentication](https://github.com/PrefectHQ/actions-prefect-auth) and [deployment creation](https://github.com/PrefectHQ/actions-prefect-deploy). These actions can simplify deploying with CI/CD when using `prefect.yaml`, especially in cases where a repository contains flows that are used in multiple deployments across multiple Prefect Cloud workspaces. + +Here's an example of integrating these actions into the workflow we created above: + +``` +name: Deploy Prefect flow + +on: + push: + branches: + - main + +jobs: + deploy: + name: Deploy + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Prefect Auth + uses: PrefectHQ/actions-prefect-auth@v1 + with: + prefect-api-key: ${{ secrets.PREFECT_API_KEY }} + prefect-workspace: ${{ secrets.PREFECT_WORKSPACE }} + + - name: Run Prefect Deploy + uses: PrefectHQ/actions-prefect-deploy@v3 + with: + deployment-names: my-deployment + requirements-file-paths: requirements.txt + +``` + + +Authenticating to other Docker image registries +--------------------------------------------------------------------------------------------------------------------- + +The `docker/login-action` GitHub Action supports pushing images to a wide variety of image registries. + +For example, if you are storing Docker images in AWS Elastic Container Registry, you can add your ECR registry URL to the `registry` key in the `with:` part of the action and use an `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` as your `username` and `password`. + +``` +- name: Login to ECR + uses: docker/login-action@v3 + with: + registry: .dkr.ecr..amazonaws.com + username: ${{ secrets.AWS_ACCESS_KEY_ID }} + password: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + +``` + + +Other resources +----------------------------------------------------- + +Check out the [Prefect Cloud Terraform provider](https://registry.terraform.io/providers/PrefectHQ/prefect/latest/docs/guides/getting-started) if you're using Terraform to manage your infrastructure. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/development/global-concurrency-limits.mdx b/docs/2.19.x/how-to-guides/development/global-concurrency-limits.mdx new file mode 100644 index 000000000000..9c841622f62f --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/global-concurrency-limits.mdx @@ -0,0 +1,358 @@ +--- +title: Global Concurrency Limits and Rate Limits +sidebarTitle: Global Concurrency Limits +description: Global concurrency limits allow you to manage execution efficiently, controlling how many tasks, flows, or other operations can run simultaneously. They are ideal when optimizing resource usage, preventing bottlenecks, and customizing task execution are priorities. +--- + +**Clarification on use of the term 'tasks'** + +In the context of global concurrency and rate limits, "tasks" refers not specifically to Prefect tasks, but to concurrent units of work in general, such as those managed by an event loop or `TaskGroup` in asynchronous programming. These general "tasks" could include Prefect tasks when they are part of an asynchronous execution environment. + + +Rate Limits ensure system stability by governing the frequency of requests or operations. They are suitable for preventing overuse, ensuring fairness, and handling errors gracefully. + +When selecting between Concurrency and Rate Limits, consider your primary goal. Choose Concurrency Limits for resource optimization and task management. Choose Rate Limits to maintain system stability and fair access to services. + +The core difference between a rate limit and a concurrency limit is the way in which slots are released. With a rate limit, slots are released at a controlled rate, controlled by `slot_decay_per_second` whereas with a concurrency limit, slots are released when the concurrency manager is exited. + +Managing Global concurrency limits and rate limits +--------------------------------------------------------------------------------------------------------------------------- + +### Active vs inactive limits + +Global concurrency limits can be in either an `active` or `inactive` state. + +* **Active**: In this state, slots can be occupied, and code execution will be blocked when slots are unable to be acquired. +* **Inactive**: In this state, slots will not be occupied, and code execution will not be blocked. Concurrency enforcement occurs only when you activate the limit. + +### Slot decay + +Global concurrency limits can be configured with slot decay. This is used when the concurrency limit is used as a rate limit, and it governs the pace at which slots are released or become available for reuse after being occupied. These slots effectively represent the concurrency capacity within a specific concurrency limit. The concept is best understood as the rate at which these slots "decay" or refresh. + +To configure slot decay, you can set the `slot_decay_per_second` parameter when defining or adjusting a concurrency limit. + +For practical use, consider the following: + +* _Higher values_: Setting `slot_decay_per_second` to a higher value, such as 5.0, results in slots becoming available relatively quickly. In this scenario, a slot that was occupied by a task will free up after just `0.2` (`1.0 / 5.0`) seconds. + +* _Lower values_: Conversely, setting `slot_decay_per_second` to a lower value, like 0.1, causes slots to become available more slowly. In this scenario it would take `10` (`1.0 / 0.1`) seconds for a slot to become available again after occupancy + + +Slot decay provides fine-grained control over the availability of slots, enabling you to optimize the rate of your workflow based on your specific requirements. + +### Via the UI + +You can create, read, edit, and delete concurrency limits via the Prefect UI. + +When creating a concurrency limit, you can specify the following parameters: + +* **Name**: The name of the concurrency limit. This name is also how you'll reference the concurrency limit in your code. Special characters, such as `/`, `%`, `&`, `>`, `<`, are not allowed. +* **Concurrency Limit**: The maximum number of slots that can be occupied on this concurrency limit. +* **Slot Decay Per Second**: Controls the rate at which slots are released when the concurrency limit is used as a rate limit. This value must be configured when using the `rate_limit` function. +* **Active**: Whether or not the concurrency limit is in an active state. + +### Via the CLI + +You can create, read, edit, and delete global concurrency limits via the Prefect CLI. + +To create a new concurrency limit, use the `prefect gcl create` command. You must specify a `--limit` argument, and can optionally specify a `--slot-decay-per-second` and `--disable` argument. + +``` +prefect gcl create my-concurrency-limit --limit 5 --slot-decay-per-second 1.0 + +``` + + +You can inspect the details of a concurrency limit using the `prefect gcl inspect` command: + +``` +prefect gcl inspect my-concurrency-limit + +``` + + +To update a concurrency limit, use the `prefect gcl update` command. You can update the `--limit`, `--slot-decay-per-second`, `--enable`, and `--disable` arguments: + +``` +prefect gcl update my-concurrency-limit --limit 10 + +``` + + +``` +prefect gcl update my-concurrency-limit --disable + +``` + + +To delete a concurrency limit, use the `prefect gcl delete` command: + +``` +prefect gcl delete my-concurrency-limit +Are you sure you want to delete global concurrency limit 'my-concurrency-limit'? [y/N]: y +Deleted global concurrency limit with name 'my-concurrency-limit'. + +``` + + +See all available commands and options by running `prefect gcl --help`. + +Using the `concurrency` context manager +--------------------------------------------------------------------------------------------------- + +The `concurrency`context manager allows control over the maximum number of concurrent operations. You can select either the synchronous (`sync`) or asynchronous (`async`) version, depending on your use case. Here's how to use it: + + +**Concurrency limits are implicitly created** + +When using the `concurrency` context manager, the concurrency limit you use will be created, in an inactive state, if it does not already exist. + + +**Sync** + +```python +from prefect import flow, task +from prefect.concurrency.sync import concurrency + + +@task +def process_data(x, y): + with concurrency("database", occupy=1): + return x + y + + +@flow +def my_flow(): + for x, y in [(1, 2), (2, 3), (3, 4), (4, 5)]: + process_data.submit(x, y) + + +if __name__ == "__main__": + my_flow() + +``` + + +**Async** + +```python +import asyncio +from prefect import flow, task +from prefect.concurrency.asyncio import concurrency + + +@task +async def process_data(x, y): + async with concurrency("database", occupy=1): + return x + y + + +@flow +async def my_flow(): + for x, y in [(1, 2), (2, 3), (3, 4), (4, 5)]: + await process_data.submit(x, y) + + +if __name__ == "__main__": + asyncio.run(my_flow()) + +``` + + +1. The code imports the necessary modules and the concurrency context manager. Use the `prefect.concurrency.sync` module for sync usage and the `prefect.concurrency.asyncio` module for async usage. +2. It defines a `process_data` task, taking `x` and `y` as input arguments. Inside this task, the concurrency context manager controls concurrency, using the `database` concurrency limit and occupying one slot. If another task attempts to run with the same limit and no slots are available, that task will be blocked until a slot becomes available. +3. A flow named `my_flow` is defined. Within this flow, it iterates through a list of tuples, each containing pairs of x and y values. For each pair, the `process_data` task is submitted with the corresponding x and y values for processing. + +Using `rate_limit` +--------------------------------------------------------- + +The Rate Limit feature provides control over the frequency of requests or operations, ensuring responsible usage and system stability. Depending on your requirements, you can utilize `rate_limit` to govern both synchronous (sync) and asynchronous (async) operations. Here's how to make the most of it: + +**Slot decay** + +When using the `rate_limit` function the concurrency limit you use must have a slot decay configured. + + +**Sync** + +```python +from prefect import flow, task +from prefect.concurrency.sync import rate_limit + + +@task +def make_http_request(): + rate_limit("rate-limited-api") + print("Making an HTTP request...") + + +@flow +def my_flow(): + for _ in range(10): + make_http_request.submit() + + +if __name__ == "__main__": + my_flow() + +``` + + +**Async** + +```python +import asyncio + +from prefect import flow, task +from prefect.concurrency.asyncio import rate_limit + + +@task +async def make_http_request(): + await rate_limit("rate-limited-api") + print("Making an HTTP request...") + + +@flow +async def my_flow(): + for _ in range(10): + await make_http_request.submit() + + +if __name__ == "__main__": + asyncio.run(my_flow()) + +``` + + +1. The code imports the necessary modules and the `rate_limit` function. Use the `prefect.concurrency.sync` module for sync usage and the `prefect.concurrency.asyncio` module for async usage. +2. It defines a `make_http_request` task. Inside this task, the `rate_limit` function is used to ensure that the requests are made at a controlled pace. +3. A flow named `my_flow` is defined. Within this flow the `make_http_request` task is submitted 10 times. + +Using `concurrency` and `rate_limit` outside of a flow +------------------------------------------------------------------------------------------------------------------------------- + +`concurreny` and `rate_limit` can be used outside of a flow to control concurrency and rate limits for any operation. + +```python +import asyncio + +from prefect.concurrency.asyncio import rate_limit + + +async def main(): + for _ in range(10): + await rate_limit("rate-limited-api") + print("Making an HTTP request...") + + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + + +Use cases +----------------------------------------- + +### Throttling task submission + +Throttling task submission to avoid overloading resources, to comply with external rate limits, or ensure a steady, controlled flow of work. + +In this scenario the `rate_limit` function is used to throttle the submission of tasks. The rate limit acts as a bottleneck, ensuring that tasks are submitted at a controlled rate, governed by the `slot_decay_per_second` setting on the associated concurrency limit. + +```python +from prefect import flow, task +from prefect.concurrency.sync import rate_limit + + +@task +def my_task(i): + return i + + +@flow +def my_flow(): + for _ in range(100): + rate_limit("slow-my-flow", occupy=1) + my_task.submit(1) + + +if __name__ == "__main__": + my_flow() + +``` + + +### Managing database connections + +Managing the maximum number of concurrent database connections to avoid exhausting database resources. + +In this scenario we've setup a concurrency limit named `database` and given it a maximum concurrency limit that matches the maximum number of database connections we want to allow. We then use the `concurrency` context manager to control the number of database connections allowed at any one time. + +```python +from prefect import flow, task, concurrency +import psycopg2 + +@task +def database_query(query): + # Here we request a single slot on the 'database' concurrency limit. This + # will block in the case that all of the database connections are in use + # ensuring that we never exceed the maximum number of database connections. + with concurrency("database", occupy=1): + connection = psycopg2.connect("") + cursor = connection.cursor() + cursor.execute(query) + result = cursor.fetchall() + connection.close() + return result + +@flow +def my_flow(): + queries = ["SELECT * FROM table1", "SELECT * FROM table2", "SELECT * FROM table3"] + + for query in queries: + database_query.submit(query) + +if __name__ == "__main__": + my_flow() + +``` + + +### Parallel data processing + +Limiting the maximum number of parallel processing tasks. + +In this scenario we want to limit the number of `process_data` tasks to five at any one time. We do this by using the `concurrency` context manager to request five slots on the `data-processing` concurrency limit. This will block until five slots are free and then submit five more tasks, ensuring that we never exceed the maximum number of parallel processing tasks. + +```python +import asyncio +from prefect.concurrency.sync import concurrency + + +async def process_data(data): + print(f"Processing: {data}") + await asyncio.sleep(1) + return f"Processed: {data}" + + +async def main(): + data_items = list(range(100)) + processed_data = [] + + while data_items: + with concurrency("data-processing", occupy=5): + chunk = [data_items.pop() for _ in range(5)] + processed_data += await asyncio.gather( + *[process_data(item) for item in chunk] + ) + + print(processed_data) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` diff --git a/docs/2.19.x/how-to-guides/development/hosting.mdx b/docs/2.19.x/how-to-guides/development/hosting.mdx new file mode 100644 index 000000000000..55a41a670a64 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/hosting.mdx @@ -0,0 +1,290 @@ +--- +title: Host a Prefect server instance¶ +sidebarTitle: Hosting +--- + +Host a Prefect server instance +----------------------------------------------------------------------------------- + +Learn how to host your own Prefect server instance. + +**Note** + +If you would like to host a Prefect server instance on Kubernetes, check out the prefect-server [Helm chart](https://github.com/PrefectHQ/prefect-helm/tree/main/charts/prefect-server). + + +After installing Prefect, you have: - a Python SDK client that can communicate with [Prefect Cloud](https://app.prefect.cloud/) - an [API server](https://docs.prefect.io/api-ref/) instance backed by a database and a UI + +### Steps + +1. Spin up a local Prefect server UI with the `prefect server start` CLI command in the terminal: + +``` +prefect server start +``` +1. Open the URL for the Prefect server UI ([http://127.0.0.1:4200](http://127.0.0.1:4200/) by default) in a browser. + +![Viewing the dashboard in the Prefect UI.](/images/hosting1.png) + +1. Shut down the Prefect server with ctrl + c in the terminal. + +### Comparing a self-hosted Prefect server instance and Prefect Cloud + +A self-hosted Prefect server instance and Prefect Cloud share a common set of features. Prefect Cloud includes the following additional features: + +* [Workspaces](https://docs.prefect.io/cloud/workspaces/) — isolated environments to organize your flows, deployments, and flow runs. +* [Automations](https://docs.prefect.io/cloud/automations/) — configure triggers, actions, and notifications in response to real-time monitoring events. +* [Email notifications](https://docs.prefect.io/cloud/automations/) — send email alerts from Prefect's servers based on automation triggers. +* [Service accounts](https://docs.prefect.io/cloud/users/service-accounts/) — configure API access for running workers or executing flow runs on remote infrastructure. +* [Custom role-based access controls (RBAC)](https://docs.prefect.io/cloud/users/roles/) — assign users granular permissions to perform activities within an account or workspace. +* [Single Sign-on (SSO)](https://docs.prefect.io/cloud/users/sso/) — authentication using your identity provider. +* [Audit Log](https://docs.prefect.io/cloud/users/audit-log/) — a record of user activities to monitor security and compliance. + +Read more about Prefect Cloud in the [Cloud](https://docs.prefect.io/cloud/) section. + +### Configure a Prefect server instance + +Go to your terminal session and run this command to set the API URL to point to a Prefect server instance: + +``` +prefect config set PREFECT_API_URL="http://127.0.0.1:4200/api" + +``` + + +### `PREFECT_API_URL` required when running Prefect inside a container + +You must set the API server address to use Prefect within a container, such as a Docker container. + +You can save the API server address in a [Prefect profile](https://docs.prefect.io/concepts/settings/). Whenever that profile is active, the API endpoint is at that address. + +See [Profiles & Configuration](https://docs.prefect.io/concepts/settings/) for more information on profiles and configurable Prefect settings. + + +The Prefect database +--------------------------------------------------------------- + +The Prefect database persists data to track the state of your flow runs and related Prefect concepts, including: + +* Flow run and task run state +* Run history +* Logs +* Deployments +* Flow and task run concurrency limits +* Storage blocks for flow and task results +* Variables +* Artifacts +* Work pool status + +Currently Prefect supports the following databases: + +* SQLite (default in Prefect): Recommended for lightweight, single-server deployments. SQLite requires essentially no setup. +* PostgreSQL: Best for connecting to external databases, but requires additional setup (such as Docker). Prefect uses the [`pg_trgm`](https://www.postgresql.org/docs/current/pgtrgm.html) extension, so it must be installed and enabled. + +### Using the database + +A local SQLite database is the default database and is configured upon Prefect installation. The database is located at `~/.prefect/prefect.db` by default. + +To reset your database, run the CLI command: + +``` +prefect server database reset -y + +``` + + +This command clears all data and reapplies the schema. + +### Database settings + +Prefect provides several settings for configuring the database. The default settings are: + +```python +PREFECT_API_DATABASE_CONNECTION_URL='sqlite+aiosqlite:///${PREFECT_HOME}/prefect.db' +PREFECT_API_DATABASE_ECHO='False' +PREFECT_API_DATABASE_MIGRATE_ON_START='True' +PREFECT_API_DATABASE_PASSWORD='None' + +``` + + +Save a setting to your active Prefect profile with `prefect config set`. + +### Configure a PostgreSQL database + +Connect Prefect to a PostgreSQL database by setting the following environment variable: + +```python +prefect config set PREFECT_API_DATABASE_CONNECTION_URL="postgresql+asyncpg://postgres:yourTopSecretPassword@localhost:5432/prefect" + +``` + + +The above environment variable assumes: + +* You have a username called `postgres` +* Your password is set to `yourTopSecretPassword` +* Your database runs on the same host as the Prefect server instance, `localhost` +* You use the default PostgreSQL port `5432` +* Your PostgreSQL instance has a database called `prefect` + +#### Quickstart: configure a PostgreSQL database with Docker + +Quickly start a PostgreSQL instance to use as your Prefect database with the following command (which will start a Docker container running PostgreSQL): + +``` +docker run -d --name prefect-postgres -v prefectdb:/var/lib/postgresql/data -p 5432:5432 -e POSTGRES_USER=postgres -e POSTGRES_PASSWORD=yourTopSecretPassword -e POSTGRES_DB=prefect postgres:latest + +``` + + +The above command: + +* Pulls the [latest](https://hub.docker.com/_/postgres?tab=tags) version of the official `postgres` Docker image, which is compatible with Prefect. +* Starts a container with the name `prefect-postgres`. +* Creates a database `prefect` with a user `postgres` and `yourTopSecretPassword` password. +* Mounts the PostgreSQL data to a Docker volume called `prefectdb` to provide persistence if you ever have to restart or rebuild that container. + +Run the command below to set your current Prefect Profile to the PostgreSQL database instance running in your Docker container. + +```python +prefect config set PREFECT_API_DATABASE_CONNECTION_URL="postgresql+asyncpg://postgres:yourTopSecretPassword@localhost:5432/prefect" + +``` + + +### Confirm your PostgreSQL database configuration + +Inspect your Prefect profile to confirm that the environment variable has been properly set: + +``` +prefect config view --show-sources + +``` + + +```python +You should see output similar to the following: + +PREFECT_PROFILE='my_profile' +PREFECT_API_DATABASE_CONNECTION_URL='********' (from profile) +PREFECT_API_URL='http://127.0.0.1:4200/api' (from profile) + +``` + + +Start the Prefect server to use your PostgreSQL database instance: + +``` +prefect server start +``` + +### In-memory database + +To use an in-memory SQLite database, set the following environment variable: + +``` +prefect config set PREFECT_API_DATABASE_CONNECTION_URL="sqlite+aiosqlite:///file::memory:?cache=shared&uri=true&check_same_thread=false" + +``` + + +**Use SQLite database for testing only** + +SQLite is only supported by Prefect for testing purposes and is not compatible with multiprocessing. + + +### Migrations + +Prefect uses [Alembic](https://alembic.sqlalchemy.org/en/latest/) to manage database migrations. Alembic is a database migration tool to use with the SQLAlchemy Database Toolkit for Python. Alembic provides a framework for generating and applying schema changes to a database. + +Apply migrations to your database with the following commands: + +To upgrade: + +``` +prefect server database upgrade -y + +``` + + +To downgrade: + +``` +prefect server database downgrade -y + +``` + + +Use the `-r` flag to specify a specific migration version to upgrade or downgrade to. For example, to downgrade to the previous migration version, run: + +``` +prefect server database downgrade -y -r -1 + +``` + + +or to downgrade to a specific revision: + +``` +prefect server database downgrade -y -r d20618ce678e + +``` + + +To downgrade all migrations, use the `base` revision. + +See the [contributing docs](https://docs.prefect.io/contributing/overview/#adding-database-migrations) to create new database migrations. + +Notifications +------------------------------------------------- + +[Prefect Cloud](https://docs.prefect.io/cloud/) gives you access to a hosted platform with Workspace & User controls, Events, and Automations. Prefect Cloud has an option for automation notifications. The more limited Notifications option is provided for the self-hosted Prefect server. + +Notifications enable you to set up alerts that are sent when a flow enters any state you specify. When your flow and task runs changes [state](https://docs.prefect.io/concepts/states/), Prefect notes the state change and checks whether the new state matches any notification policies. If it does, a new notification is queued. + +Prefect supports sending notifications through: + +* Custom webhook +* Discord webhook +* Mattermost webhook +* Microsoft Teams webhook +* Opsgenie webhook +* PagerDuty webhook +* Sendgrid email +* Slack webhook +* Twilio SMS + + +**Notifications in Prefect Cloud** + +Prefect Cloud uses the robust [Automations](https://docs.prefect.io/cloud/automations/) interface to enable notifications related to flow run state changes and work pool status. + + +### Configure notifications + +To configure a notification in a Prefect server, go to the **Notifications** page and select **Create Notification** or the **+** button. + +![Creating a notification in the Prefect UI](/images/hosting2.png) + +You can choose: + +* Which run states should trigger a notification +* Tags to filter which flow runs are covered by the notification +* Whether to send an email, a Slack message, Microsoft Teams message, or use another services + +For email notifications (supported on Prefect Cloud only), the configuration requires email addresses to which the message is sent. + +For Slack notifications, the configuration requires webhook credentials for your Slack and the channel to which the message is sent. + +For example, to get a Slack message if a flow with a `daily-etl` tag fails, the notification will read: + +> If a run of any flow with **daily-etl** tag enters a **failed** state, send a notification to **my-slack-webhook** + +When the conditions of the notification are triggered, you’ll receive a message: + +> The **fuzzy-leopard** run of the **daily-etl** flow entered a **failed** state at **22-06-27 16:21:37 EST**. + +On the **Notifications** page you can pause, edit, or delete any configured notification. + +![Viewing all configured notifications in the Prefect UI](/images/hosting3.png) \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/development/interactive-workflows.mdx b/docs/2.19.x/how-to-guides/development/interactive-workflows.mdx new file mode 100644 index 000000000000..5ee8f7014488 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/interactive-workflows.mdx @@ -0,0 +1,758 @@ +--- +title: Creating Interactive Workflows +sidebarTitle: Interactive Workflows +--- + +Flows can pause or suspend execution and automatically resume when they receive type-checked input in Prefect's UI. Flows can also send and receive type-checked input at any time while running, without pausing or suspending. This guide will show you how to use these features to build _interactive workflows_. + +**A note on async Python syntax** + +Most of the example code in this section uses async Python functions and `await`. However, as with other Prefect features, you can call these functions with or without `await`. + + +Pausing or suspending a flow until it receives input +------------------------------------------------------------------------------------------------------------------------------- + +You can pause or suspend a flow until it receives input from a user in Prefect's UI. This is useful when you need to ask for additional information or feedback before resuming a flow. Such workflows are often called [human-in-the-loop](https://hai.stanford.edu/news/humans-loop-design-interactive-ai-systems) (HITL) systems. + +**What is human-in-the-loop interactivity used for?** + +Approval workflows that pause to ask a human to confirm whether a workflow should continue are very common in the business world. Certain types of [machine learning training](https://link.springer.com/article/10.1007/s10462-022-10246-w) and artificial intelligence workflows benefit from incorporating HITL design. + +### Waiting for input + +To receive input while paused or suspended use the `wait_for_input` parameter in the `pause_flow_run` or `suspend_flow_run` functions. This parameter accepts one of the following: + +* A built-in type like `int` or `str`, or a built-in collection like `List[int]` +* A `pydantic.BaseModel` subclass +* A subclass of `prefect.input.RunInput` + + +**When to use a `RunModel` or `BaseModel` instead of a built-in type** + +There are a few reasons to use a `RunModel` or `BaseModel`. The first is that when you let Prefect automatically create one of these classes for your input type, the field that users will see in Prefect's UI when they click "Resume" on a flow run is named `value` and has no help text to suggest what the field is. If you create a `RunInput` or `BaseModel`, you can change details like the field name, help text, and default value, and users will see those reflected in the "Resume" form. + + + +The simplest way to pause or suspend and wait for input is to pass a built-in type: + +```python +from prefect import flow, pause_flow_run, get_run_logger + +@flow +def greet_user(): + logger = get_run_logger() + + user = pause_flow_run(wait_for_input=str) + + logger.info(f"Hello, {user}!") + +``` + + +In this example, the flow run will pause until a user clicks the Resume button in the Prefect UI, enters a name, and submits the form. + +**What types can you pass for `wait_for_input`?** + +When you pass a built-in type such as `int` as an argument for the `wait_for_input` parameter to `pause_flow_run` or `suspend_flow_run`, Prefect automatically creates a Pydantic model containing one field annotated with the type you specified. This means you can use [any type annotation that Pydantic accepts for model fields](https://docs.pydantic.dev/1.10/usage/types/) with these functions. + + + +Instead of a built-in type, you can pass in a `pydantic.BaseModel` class. This is useful if you already have a `BaseModel` you want to use: + +```python +from prefect import flow, pause_flow_run, get_run_logger +from pydantic import BaseModel + + +class User(BaseModel): + name: str + age: int + + +@flow +async def greet_user(): + logger = get_run_logger() + + user = await pause_flow_run(wait_for_input=User) + + logger.info(f"Hello, {user.name}!") + +``` + + +**`BaseModel` classes are upgraded to `RunInput` classes automatically** + +When you pass a `pydantic.BaseModel` class as the `wait_for_input` argument to `pause_flow_run` or `suspend_flow_run`, Prefect automatically creates a `RunInput` class with the same behavior as your `BaseModel` and uses that instead. + + +`RunInput` classes contain extra logic that allows flows to send and receive them at runtime. You shouldn't notice any difference! + +Finally, for advanced use cases like overriding how Prefect stores flow run inputs, you can create a `RunInput` class: + +``` +from prefect import get_run_logger +from prefect.input import RunInput + +class UserInput(RunInput): + name: str + age: int + + # Imagine overridden methods here! + def override_something(self, *args, **kwargs): + super().override_something(*args, **kwargs) + +@flow +async def greet_user(): + logger = get_run_logger() + + user = await pause_flow_run(wait_for_input=UserInput) + + logger.info(f"Hello, {user.name}!") + +``` + + +### Providing initial data + +You can set default values for fields in your model by using the `with_initial_data` method. This is useful when you want to provide default values for the fields in your own `RunInput` class. + +Expanding on the example above, you could make the `name` field default to "anonymous": + +```python +from prefect import get_run_logger +from prefect.input import RunInput + +class UserInput(RunInput): + name: str + age: int + +@flow +async def greet_user(): + logger = get_run_logger() + + user_input = await pause_flow_run( + wait_for_input=UserInput.with_initial_data(name="anonymous") + ) + + if user_input.name == "anonymous": + logger.info("Hello, stranger!") + else: + logger.info(f"Hello, {user_input.name}!") + +``` + + +When a user sees the form for this input, the name field will contain "anonymous" as the default. + +### Providing a description with runtime data + +You can provide a dynamic, markdown description that will appear in the Prefect UI when the flow run pauses. This feature enables context-specific prompts, enhancing clarity and user interaction. Building on the example above: + +```python +from datetime import datetime +from prefect import flow, pause_flow_run, get_run_logger +from prefect.input import RunInput + + +class UserInput(RunInput): + name: str + age: int + + +@flow +async def greet_user(): + logger = get_run_logger() + current_date = datetime.now().strftime("%B %d, %Y") + + description_md = f""" +**Welcome to the User Greeting Flow!** +Today's Date: {current_date} + +Please enter your details below: +- **Name**: What should we call you? +- **Age**: Just a number, nothing more. +""" + + user_input = await pause_flow_run( + wait_for_input=UserInput.with_initial_data( + description=description_md, name="anonymous" + ) + ) + + if user_input.name == "anonymous": + logger.info("Hello, stranger!") + else: + logger.info(f"Hello, {user_input.name}!") + +``` + + +When a user sees the form for this input, the given markdown will appear above the input fields. + +### Handling custom validation + +Prefect uses the fields and type hints on your `RunInput` or `BaseModel` class to validate the general structure of input your flow receives, but you might require more complex validation. If you do, you can use Pydantic [validators](https://docs.pydantic.dev/1.10/usage/validators/). + +**Custom validation runs after the flow resumes** + +Prefect transforms the type annotations in your `RunInput` or `BaseModel` class to a JSON schema and uses that schema in the UI for client-side validation. However, custom validation requires running _Python_ logic defined in your `RunInput` class. Because of this, validation happens _after the flow resumes_, so you'll want to handle it explicitly in your flow. Continue reading for an example best practice. + + +The following is an example `RunInput` class that uses a custom field validator: + +```python +import pydantic +from prefect.input import RunInput + + +class ShirtOrder(RunInput): + size: Literal["small", "medium", "large", "xlarge"] + color: Literal["red", "green", "black"] + + @pydantic.validator("color") + def validate_age(cls, value, values, **kwargs): + if value == "green" and values["size"] == "small": + raise ValueError( + "Green is only in-stock for medium, large, and XL sizes." + ) + + return value + +``` + + +In the example, we use Pydantic's `validator` decorator to define a custom validation method for the `color` field. We can use it in a flow like this: + +```python +import pydantic +from prefect import flow, pause_flow_run +from prefect.input import RunInput + + +class ShirtOrder(RunInput): + size: Literal["small", "medium", "large", "xlarge"] + color: Literal["red", "green", "black"] + + @pydantic.validator("color") + def validate_age(cls, value, values, **kwargs): + if value == "green" and values["size"] == "small": + raise ValueError( + "Green is only in-stock for medium, large, and XL sizes." + ) + + return value + + +@flow +def get_shirt_order(): + shirt_order = pause_flow_run(wait_for_input=ShirtOrder) + +``` + + +If a user chooses any size and color combination other than `small` and `green`, the flow run will resume successfully. However, if the user chooses size `small` and color `green`, the flow run will resume, and `pause_flow_run` will raise a `ValidationError` exception. This will cause the flow run to fail and log the error. + +However, what if you don't want the flow run to fail? One way to handle this case is to use a `while` loop and pause again if the `ValidationError` exception is raised: + +```python +from typing import Literal + +import pydantic +from prefect import flow, get_run_logger, pause_flow_run +from prefect.input import RunInput + + +class ShirtOrder(RunInput): + size: Literal["small", "medium", "large", "xlarge"] + color: Literal["red", "green", "black"] + + @pydantic.validator("color") + def validate_age(cls, value, values, **kwargs): + if value == "green" and values["size"] == "small": + raise ValueError( + "Green is only in-stock for medium, large, and XL sizes." + ) + + return value + + +@flow +def get_shirt_order(): + logger = get_run_logger() + shirt_order = None + + while shirt_order is None: + try: + shirt_order = pause_flow_run(wait_for_input=ShirtOrder) + except pydantic.ValidationError as exc: + logger.error(f"Invalid size and color combination: {exc}") + + logger.info( + f"Shirt order: {shirt_order.size}, {shirt_order.color}" + ) + +``` + + +This code will cause the flow run to continually pause until the user enters a valid age. + +As an additional step, you may want to use an [automation](https://docs.prefect.io/concepts/automations) or [notification](https://docs.prefect.io/concepts/notifications/) to alert the user to the error. + +Sending and receiving input at runtime +--------------------------------------------------------------------------------------------------- + +Use the `send_input` and `receive_input` functions to send input to a flow or receive input from a flow at runtime. You don't need to pause or suspend the flow to send or receive input. + +**Why would you send or receive input without pausing or suspending?** + +You might want to send or receive input without pausing or suspending in scenarios where the flow run is designed to handle real-time data. For instance, in a live monitoring system, you might need to update certain parameters based on the incoming data without interrupting the flow. Another use is having a long-running flow that continually responds to runtime input with low latency. For example, if you're building a chatbot, you could have a flow that starts a GPT Assistant and manages a conversation thread. + +The most important parameter to the `send_input` and `receive_input` functions is `run_type`, which should be one of the following: + +* A built-in type such as `int` or `str` +* A `pydantic.BaseModel` class +* A `prefect.input.RunInput` class + + +**When to use a `BaseModel` or `RunInput` instead of a built-in type** + +Most built-in types and collections of built-in types should work with `send_input` and `receive_input`, but there is a caveat with nested collection types, such as lists of tuples, e.g. `List[Tuple[str, float]])`. In this case, validation may happen after your flow receives the data, so calling `receive_input` may raise a `ValidationError`. You can plan to catch this exception, but also, consider placing the field in an explicit `BaseModel` or `RunInput` so that your flow only receives exact type matches. + +Let's look at some examples! We'll check out `receive_input` first, followed by `send_input`, and then we'll see the two functions working together. + +### Receiving input + +The following flow uses `receive_input` to continually receive names and print a personalized greeting for each name it receives: + +```python +from prefect import flow +from prefect.input.run_input import receive_input + + +@flow +async def greeter_flow(): + async for name_input in receive_input(str, timeout=None): + # Prints "Hello, andrew!" if another flow sent "andrew" + print(f"Hello, {name_input}!") + +``` + + +When you pass a type such as `str` into `receive_input`, Prefect creates a `RunInput` class to manage your input automatically. When a flow sends input of this type, Prefect uses the `RunInput` class to validate the input. If the validation succeeds, your flow receives the input in the type you specified. In this example, if the flow received a valid string as input, the variable `name_input` would contain the string value. + +If, instead, you pass a `BaseModel`, Prefect upgrades your `BaseModel` to a `RunInput` class, and the variable your flow sees — in this case, `name_input` — is a `RunInput` instance that behaves like a `BaseModel`. Of course, if you pass in a `RunInput` class, no upgrade is needed, and you'll get a `RunInput` instance. + +If you prefer to keep things simple and pass types such as `str` into `receive_input`, you can do so. If you need access to the generated `RunInput` that contains the received value, pass `with_metadata=True` to `receive_input`: + +```python +from prefect import flow +from prefect.input.run_input import receive_input + + +@flow +async def greeter_flow(): + async for name_input in receive_input( + str, + timeout=None, + with_metadata=True + ): + # Input will always be in the field "value" on this object. + print(f"Hello, {name_input.value}!") + +``` + + +**Why would you need to use `with_metadata=True`?** + +The primary uses of accessing the `RunInput` object for a receive input are to respond to the sender with the `RunInput.respond()` function or to access the unique key for an input. Later in this guide, we'll discuss how and why you might use these features. + + +Notice that we are now printing `name_input.value`. When Prefect generates a `RunInput` for you from a built-in type, the `RunInput` class has a single field, `value`, that uses a type annotation matching the type you specified. So if you call `receive_input` like this: `receive_input(str, with_metadata=True)`, that's equivalent to manually creating the following `RunInput` class and `receive_input` call: + +```python +from prefect import flow +from prefect.input.run_input import RunInput + +class GreeterInput(RunInput): + value: str + +@flow +async def greeter_flow(): + async for name_input in receive_input(GreeterInput, timeout=None): + print(f"Hello, {name_input.value}!") + +``` + + +**The type used in `receive_input` and `send_input` must match** + +For a flow to receive input, the sender must use the same type that the receiver is receiving. This means that if the receiver is receiving `GreeterInput`, the sender must send `GreeterInput`. If the receiver is receiving `GreeterInput` and the sender sends `str` input that Prefect automatically upgrades to a `RunInput` class, the types won't match, so the receiving flow run won't receive the input. However, the input will be waiting if the flow ever calls `receive_input(str)`! + + +### Keeping track of inputs you've already seen + +By default, each time you call `receive_input`, you get an iterator that iterates over all known inputs to a specific flow run, starting with the first received. The iterator will keep track of your current position as you iterate over it, or you can call `next()` to explicitly get the next input. If you're using the iterator in a loop, you should probably assign it to a variable: + +```python +from prefect import flow, get_client +from prefect.deployments.deployments import run_deployment +from prefect.input.run_input import receive_input, send_input + +EXIT_SIGNAL = "__EXIT__" + + +@flow +async def sender(): + greeter_flow_run = await run_deployment( + "greeter/send-receive", timeout=0, as_subflow=False + ) + client = get_client() + + # Assigning the `receive_input` iterator to a variable + # outside of the the `while True` loop allows us to continue + # iterating over inputs in subsequent passes through the + # while loop without losing our position. + receiver = receive_input( + str, + with_metadata=True, + timeout=None, + poll_interval=0.1 + ) + + while True: + name = input("What is your name? ") + if not name: + continue + + if name == "q" or name == "quit": + await send_input( + EXIT_SIGNAL, + flow_run_id=greeter_flow_run.id + ) + print("Goodbye!") + break + + await send_input(name, flow_run_id=greeter_flow_run.id) + + # Saving the iterator outside of the while loop and + # calling next() on each iteration of the loop ensures + # that we're always getting the newest greeting. If we + # had instead called `receive_input` here, we would + # always get the _first_ greeting this flow received, + # print it, and then ask for a new name. + greeting = await receiver.next() + print(greeting) + +``` + + +So, an iterator helps to keep track of the inputs your flow has already received. But what if you want your flow to suspend and then resume later, picking up where it left off? In that case, you will need to save the keys of the inputs you've seen so that the flow can read them back out when it resumes. You might use a [Block](https://docs.prefect.io/concepts/blocks/), such as a `JSONBlock`. + +The following flow receives input for 30 seconds then suspends itself, which exits the flow and tears down infrastructure: + +```python +from prefect import flow, get_run_logger, suspend_flow_run +from prefect.blocks.system import JSON +from prefect.context import get_run_context +from prefect.input.run_input import receive_input + + +EXIT_SIGNAL = "__EXIT__" + + +@flow +async def greeter(): + logger = get_run_logger() + run_context = get_run_context() + assert run_context.flow_run, "Could not see my flow run ID" + + block_name = f"{run_context.flow_run.id}-seen-ids" + + try: + seen_keys_block = await JSON.load(block_name) + except ValueError: + seen_keys_block = JSON( + value=[], + ) + + try: + async for name_input in receive_input( + str, + with_metadata=True, + poll_interval=0.1, + timeout=30, + exclude_keys=seen_keys_block.value + ): + if name_input.value == EXIT_SIGNAL: + print("Goodbye!") + return + await name_input.respond(f"Hello, {name_input.value}!") + + seen_keys_block.value.append(name_input.metadata.key) + await seen_keys_block.save( + name=block_name, + overwrite=True + ) + except TimeoutError: + logger.info("Suspending greeter after 30 seconds of idle time") + await suspend_flow_run(timeout=10000) + +``` + + +As this flow processes name input, it adds the _key_ of the flow run input to the `seen_keys_block`. When the flow later suspends and then resumes, it reads the keys it has already seen out of the JSON Block and passes them as the `exlude_keys` parameter to `receive_input`. + +### Responding to the input's sender + +When your flow receives input from another flow, Prefect knows the sending flow run ID, so the receiving flow can respond by calling the `respond` method on the `RunInput` instance the flow received. There are a couple of requirements: + +1. You will need to pass in a `BaseModel` or `RunInput`, or use `with_metadata=True` +2. The flow you are responding to must receive the same type of input you send in order to see it. + +The `respond` method is equivalent to calling `send_input(..., flow_run_id=sending_flow_run.id)`, but with `respond`, your flow doesn't need to know the sending flow run's ID. + +Now that we know about `respond`, let's make our `greeter_flow` respond to name inputs instead of printing them: + +```python +from prefect import flow +from prefect.input.run_input import receive_input + + +@flow +async def greeter(): + async for name_input in receive_input( + str, + with_metadata=True, + timeout=None + ): + await name_input.respond(f"Hello, {name_input.value}!") + +``` + + +Cool! There's one problem left: this flow runs forever! We need a way to signal that it should exit. Let's keep things simple and teach it to look for a special string: + +```python +from prefect import flow +from prefect.input.run_input import receive_input + + + +EXIT_SIGNAL = "__EXIT__" + + +@flow +async def greeter(): + async for name_input in receive_input( + str, + with_metadata=True, + poll_interval=0.1, + timeout=None + ): + if name_input.value == EXIT_SIGNAL: + print("Goodbye!") + return + await name_input.respond(f"Hello, {name_input.value}!") + +``` + + +With a `greeter` flow in place, we're ready to create the flow that sends `greeter` names! + +### Sending input + +You can send input to a flow with the `send_input` function. This works similarly to `receive_input` and, like that function, accepts the same `run_input` argument, which can be a built-in type such as `str`, or else a `BaseModel` or `RunInput` subclass. + +**When can you send input to a flow run?** + +You can send input to a flow run as soon as you have the flow run's ID. The flow does not have to be receiving input for you to send input. If you send a flow input before it is receiving, it will see your input when it calls `receive_input` (as long as the types in the `send_input` and `receive_input` calls match!) + +Next, we'll create a `sender` flow that starts a `greeter` flow run and then enters a loop, continuously getting input from the terminal and sending it to the greeter flow: + +```python +@flow +async def sender(): + greeter_flow_run = await run_deployment( + "greeter/send-receive", timeout=0, as_subflow=False + ) + receiver = receive_input(str, timeout=None, poll_interval=0.1) + client = get_client() + + while True: + flow_run = await client.read_flow_run(greeter_flow_run.id) + + if not flow_run.state or not flow_run.state.is_running(): + continue + + name = input("What is your name? ") + if not name: + continue + + if name == "q" or name == "quit": + await send_input( + EXIT_SIGNAL, + flow_run_id=greeter_flow_run.id + ) + print("Goodbye!") + break + + await send_input(name, flow_run_id=greeter_flow_run.id) + greeting = await receiver.next() + print(greeting) + +``` + + +There's more going on here than in `greeter`, so let's take a closer look at the pieces. + +First, we use `run_deployment` to start a `greeter` flow run. This means we must have a worker or `flow.serve()` running in separate process. That process will begin running `greeter` while `sender` continues to execute. Calling `run_deployment(..., timeout=0)` ensures that `sender` won't wait for the `greeter` flow run to complete, because it's running a loop and will only exit when we send `EXIT_SIGNAL`. + +Next, we capture the iterator returned by `receive_input` as `receiver`. This flow works by entering a loop, and on each iteration of the loop, the flow asks for terminal input, sends that to the `greeter` flow, and then runs `receiver.next()` to wait until it receives the response from `greeter`. + +Next, we let the terminal user who ran this flow exit by entering the string `q` or `quit`. When that happens, we send the `greeter` flow an exit signal so it will shut down too. + +Finally, we send the new name to `greeter`. We know that `greeter` is going to send back a greeting as a string, so we immediately wait for new string input. When we receive the greeting, we print it and continue the loop that gets terminal input. + +### Seeing a complete example + +Finally, let's see a complete example of using `send_input` and `receive_input`. Here is what the `greeter` and `sender` flows look like together: + +```python +import asyncio +import sys +from prefect import flow, get_client +from prefect.blocks.system import JSON +from prefect.context import get_run_context +from prefect.deployments.deployments import run_deployment +from prefect.input.run_input import receive_input, send_input + + +EXIT_SIGNAL = "__EXIT__" + + +@flow +async def greeter(): + run_context = get_run_context() + assert run_context.flow_run, "Could not see my flow run ID" + + block_name = f"{run_context.flow_run.id}-seen-ids" + + try: + seen_keys_block = await JSON.load(block_name) + except ValueError: + seen_keys_block = JSON( + value=[], + ) + + async for name_input in receive_input( + str, + with_metadata=True, + poll_interval=0.1, + timeout=None + ): + if name_input.value == EXIT_SIGNAL: + print("Goodbye!") + return + await name_input.respond(f"Hello, {name_input.value}!") + + seen_keys_block.value.append(name_input.metadata.key) + await seen_keys_block.save( + name=block_name, + overwrite=True + ) + + +@flow +async def sender(): + greeter_flow_run = await run_deployment( + "greeter/send-receive", timeout=0, as_subflow=False + ) + receiver = receive_input(str, timeout=None, poll_interval=0.1) + client = get_client() + + while True: + flow_run = await client.read_flow_run(greeter_flow_run.id) + + if not flow_run.state or not flow_run.state.is_running(): + continue + + name = input("What is your name? ") + if not name: + continue + + if name == "q" or name == "quit": + await send_input( + EXIT_SIGNAL, + flow_run_id=greeter_flow_run.id + ) + print("Goodbye!") + break + + await send_input(name, flow_run_id=greeter_flow_run.id) + greeting = await receiver.next() + print(greeting) + + +if __name__ == "__main__": + if sys.argv[1] == "greeter": + asyncio.run(greeter.serve(name="send-receive")) + elif sys.argv[1] == "sender": + asyncio.run(sender()) + +``` + + +To run the example, you'll need a Python environment with Prefect installed, pointed at either an open-source Prefect server instance or Prefect Cloud. + +With your environment set up, start a flow runner in one terminal with the following command: + +``` +python my_file_name greeter + +``` + + +For example, with Prefect Cloud, you should see output like this: + +``` +╭──────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ Your flow 'greeter' is being served and polling for scheduled runs! │ +│ │ +│ To trigger a run for this flow, use the following command: │ +│ │ +│ $ prefect deployment run 'greeter/send-receive' │ +│ │ +│ You can also run your flow via the Prefect UI: │ +│ https://app.prefect.cloud/account/...(a URL for your account) │ +│ │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ + +``` + + +Then start the greeter process in another terminal: + +``` +python my_file_name sender + +``` + + +You should see output like this: + +``` +11:38:41.800 | INFO | prefect.engine - Created flow run 'gregarious-owl' for flow 'sender' +11:38:41.802 | INFO | Flow run 'gregarious-owl' - View at https://app.prefect.cloud/account/... +What is your name? + +``` + + +Type a name and press the enter key to see a greeting, and you'll see sending and receiving in action: + +``` +What is your name? andrew +Hello, andrew! + +``` diff --git a/docs/2.19.x/how-to-guides/development/prefect-client.mdx b/docs/2.19.x/how-to-guides/development/prefect-client.mdx new file mode 100644 index 000000000000..eab03af494c0 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/prefect-client.mdx @@ -0,0 +1,217 @@ +--- +title: Using the Prefect Orchestration Client +sidebarTitle: Prefect Client +--- + +Overview +--------------------------------------- + +In the [API reference for the `PrefectClient`](https://docs.prefect.io/api-ref/prefect/client/orchestration/), you can find many useful client methods that make it simpler to do things such as: + +* [reschedule late flow runs](#rescheduling-late-flow-runs) +* [get the last `N` completed flow runs from my workspace](#get-the-last-n-completed-flow-runs-from-my-workspace) + +The `PrefectClient` is an async context manager, so you can use it like this: + +```python +from prefect import get_client + +async with get_client() as client: + response = await client.hello() + print(response.json()) # 👋 + +``` + + +Examples +--------------------------------------- + +### Rescheduling late flow runs + +Sometimes, you may need to bulk reschedule flow runs that are late - for example, if you've accidentally scheduled many flow runs of a deployment to an inactive work pool. + +To do this, we can delete late flow runs and create new ones in a `Scheduled` state with a delay. + +This example reschedules the last 3 late flow runs of a deployment named `healthcheck-storage-test` to run 6 hours later than their original expected start time. It also deletes any remaining late flow runs of that deployment. + +```python +import asyncio +from datetime import datetime, timedelta, timezone +from typing import Optional + +from prefect import get_client +from prefect.client.schemas.filters import ( + DeploymentFilter, FlowRunFilter +) +from prefect.client.schemas.objects import FlowRun +from prefect.client.schemas.sorting import FlowRunSort +from prefect.states import Scheduled + +async def reschedule_late_flow_runs( + deployment_name: str, + delay: timedelta, + most_recent_n: int, + delete_remaining: bool = True, + states: Optional[list[str]] = None +) -> list[FlowRun]: + if not states: + states = ["Late"] + + async with get_client() as client: + flow_runs = await client.read_flow_runs( + flow_run_filter=FlowRunFilter( + state=dict(name=dict(any_=states)), + expected_start_time=dict( + before_=datetime.now(timezone.utc) + ), + ), + deployment_filter=DeploymentFilter( + name={'like_': deployment_name} + ), + sort=FlowRunSort.START_TIME_DESC, + limit=most_recent_n if not delete_remaining else None + ) + + if not flow_runs: + print(f"No flow runs found in states: {states!r}") + return [] + + rescheduled_flow_runs = [] + for i, run in enumerate(flow_runs): + await client.delete_flow_run(flow_run_id=run.id) + if i < most_recent_n: + new_run = await client.create_flow_run_from_deployment( + deployment_id=run.deployment_id, + state=Scheduled( + scheduled_time=run.expected_start_time + delay + ), + ) + rescheduled_flow_runs.append(new_run) + + return rescheduled_flow_runs + +if __name__ == "__main__": + rescheduled_flow_runs = asyncio.run( + reschedule_late_flow_runs( + deployment_name="healthcheck-storage-test", + delay=timedelta(hours=6), + most_recent_n=3, + ) + ) + + print(f"Rescheduled {len(rescheduled_flow_runs)} flow runs") + + assert all( + run.state.is_scheduled() for run in rescheduled_flow_runs + ) + assert all( + run.expected_start_time > datetime.now(timezone.utc) + for run in rescheduled_flow_runs + ) + +``` + + +### Get the last `N` completed flow runs from my workspace + +To get the last `N` completed flow runs from our workspace, we can make use of `read_flow_runs` and `prefect.client.schemas`. + +This example gets the last three completed flow runs from our workspace: + +```python +import asyncio +from typing import Optional + +from prefect import get_client +from prefect.client.schemas.filters import FlowRunFilter +from prefect.client.schemas.objects import FlowRun +from prefect.client.schemas.sorting import FlowRunSort + +async def get_most_recent_flow_runs( + n: int = 3, + states: Optional[list[str]] = None +) -> list[FlowRun]: + if not states: + states = ["COMPLETED"] + + async with get_client() as client: + return await client.read_flow_runs( + flow_run_filter=FlowRunFilter( + state={'type': {'any_': states}} + ), + sort=FlowRunSort.END_TIME_DESC, + limit=n, + ) + +if __name__ == "__main__": + last_3_flow_runs: list[FlowRun] = asyncio.run( + get_most_recent_flow_runs() + ) + print(last_3_flow_runs) + + assert all( + run.state.is_completed() for run in last_3_flow_runs + ) + assert ( + end_times := [run.end_time for run in last_3_flow_runs] + ) == sorted(end_times, reverse=True) + +``` + + +Instead of the last three from the whole workspace, you could also use the `DeploymentFilter` like the previous example to get the last three completed flow runs of a specific deployment. + +### Transition all running flows to cancelled via the Client + +It can be cumbersome to cancel many flow runs through the UI. You can use `get_client`to set multiple runs to a `Cancelled` state. The code below will cancel all flow runs that are in `Pending`, `Running`, `Scheduled`, or `Late` states when the script is run. + +``` +import anyio + +from prefect import get_client +from prefect.client.schemas.filters import FlowRunFilter, FlowRunFilterState, FlowRunFilterStateName +from prefect.client.schemas.objects import StateType + +async def list_flow_runs_with_states(states: list[str]): + async with get_client() as client: + flow_runs = await client.read_flow_runs( + flow_run_filter=FlowRunFilter( + state=FlowRunFilterState( + name=FlowRunFilterStateName(any_=states) + ) + ) + ) + return flow_runs + + +async def cancel_flow_runs(flow_runs): + async with get_client() as client: + for idx, flow_run in enumerate(flow_runs): + print(f"[{idx + 1}] Cancelling flow run '{flow_run.name}' with ID '{flow_run.id}'") + state_updates = {} + state_updates.setdefault("name", "Cancelled") + state_updates.setdefault("type", StateType.CANCELLED) + state = flow_run.state.copy(update=state_updates) + await client.set_flow_run_state(flow_run.id, state, force=True) + + +async def bulk_cancel_flow_runs(): + states = ["Pending", "Running", "Scheduled", "Late"] + flow_runs = await list_flow_runs_with_states(states) + + while len(flow_runs) > 0: + print(f"Cancelling {len(flow_runs)} flow runs\n") + await cancel_flow_runs(flow_runs) + flow_runs = await list_flow_runs_with_states(states) + print("Done!") + + +if __name__ == "__main__": + anyio.run(bulk_cancel_flow_runs) + +``` + +**There are other ways to filter objects like flow runs** + +See [`the filters API reference`](https://docs.prefect.io/api-ref/prefect/client/schemas/#prefect.client.schemas.filters) for more ways to filter flow runs and other objects in your Prefect ecosystem. + \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/development/profiles--settings.mdx b/docs/2.19.x/how-to-guides/development/profiles--settings.mdx new file mode 100644 index 000000000000..3f2ef4d9e8d1 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/profiles--settings.mdx @@ -0,0 +1,508 @@ +--- +title: Profiles & Configuration +sidebarTitle: Profiles & Settings +--- + +Prefect's local settings are [documented](https://docs.prefect.io/2.19.1/api-ref/prefect/settings/#prefect.settings.Settings) and type-validated. + +By modifying the default settings, you can customize various aspects of the system. You can override a setting with an environment variable or by updating the setting in a Prefect [profile](#configuration-profiles). + +Prefect profiles are persisted groups of settings on your local machine. A single profile is always active. + +Initially, a default profile named `default` is active and contains no settings overrides. + +All currently active settings can be viewed from the command line by running the following command: + +``` +prefect config view --show-defaults + +``` + + +When you switch to a different profile, all of the settings configured in the newly activated profile are applied. + +Commonly configured settings +------------------------------------------------------------------------------- + +This section describes some commonly configured settings. See [Configuring settings](#configuring-settings) for details on setting and unsetting configuration values. + +### PREFECT\_API\_KEY + +The `PREFECT_API_KEY` value specifies the [API key](https://docs.prefect.io/ui/cloud-api-keys/#create-an-api-key) used to authenticate with Prefect Cloud. + +```python +PREFECT_API_KEY="[API-KEY]" + +``` + + +Generally, you will set the `PREFECT_API_URL` and `PREFECT_API_KEY` for your active profile by running `prefect cloud login`. If you're curious, read more about [managing API keys](https://docs.prefect.io/cloud/users/api-keys/). + +### PREFECT\_API\_URL + +The `PREFECT_API_URL` value specifies the API endpoint of your Prefect Cloud workspace or a self-hosted Prefect server instance. + +For example, if using Prefect Cloud: + +``` +PREFECT_API_URL="https://api.prefect.cloud/api/accounts/[ACCOUNT-ID]/workspaces/[WORKSPACE-ID]" + +``` + + +You can view your Account ID and Workspace ID in your browser URL when at a Prefect Cloud workspace page. For example: [https://app.prefect.cloud/account/abc-my-account-id-is-here/workspaces/123-my-workspace-id-is-here](https://app.prefect.cloud/account/abc-my-account-id-is-here/workspaces/123-my-workspace-id-is-here). + +If using a local Prefect server instance, set your API URL like this: + +``` +PREFECT_API_URL="http://127.0.0.1:4200/api" + +``` + + +### `PREFECT_API_URL` setting for workers + +If using a [worker](https://docs.prefect.io/concepts/work-pools/) (agent and block-based deployments are legacy) that can create flow runs for deployments in remote environments, [`PREFECT_API_URL`](https://docs.prefect.io/concepts/settings/) must be set for the environment in which your worker is running. + +If you want the worker to communicate with Prefect Cloud or a Prefect server instance from a remote execution environment such as a VM or Docker container, you must configure `PREFECT_API_URL` in that environment. + + + +### Running the Prefect UI behind a reverse proxy + +When using a reverse proxy (such as [Nginx](https://nginx.org/) or [Traefik](https://traefik.io/)) to proxy traffic to a locally-hosted Prefect UI instance, the Prefect server instance also needs to be configured to know how to connect to the API. The [`PREFECT_UI_API_URL`](https://docs.prefect.io/2.19.1/api-ref/prefect/settings/#PREFECT_UI_API_URL) should be set to the external proxy URL (e.g. if your external URL is [https://prefect-server.example.com/](https://prefect-server.example.com/) then set `PREFECT_UI_API_URL=https://prefect-server.example.com/api` for the Prefect server process). You can also accomplish this by setting [`PREFECT_API_URL`](https://docs.prefect.io/concepts/settings/#prefect.settings.PREFECT_API_URL) to the API URL, as this setting is used as a fallback if `PREFECT_UI_API_URL` is not set. + + +### PREFECT\_HOME + +The `PREFECT_HOME` value specifies the local Prefect directory for configuration files, profiles, and the location of the default [Prefect SQLite database](https://docs.prefect.io/concepts/database/). + +``` +PREFECT_HOME='~/.prefect' + +``` + + +### PREFECT\_LOCAL\_STORAGE\_PATH + +The `PREFECT_LOCAL_STORAGE_PATH` value specifies the default location of local storage for flow runs. + +``` +PREFECT_LOCAL_STORAGE_PATH='${PREFECT_HOME}/storage' + +``` + + +### CSRF Protection Settings + +If using a local Prefect server instance, you can configure CSRF protection settings. + +`PREFECT_SERVER_CSRF_PROTECTION_ENABLED` - Activates CSRF protection on the server, requiring valid CSRF tokens for applicable requests. Recommended for production to prevent CSRF attacks. Defaults to False. + +``` +PREFECT_SERVER_CSRF_PROTECTION_ENABLED=True + +``` + + +`PREFECT_SERVER_CSRF_TOKEN_EXPIRATION` - Sets the expiration duration for server-issued CSRF tokens, influencing how often tokens need to be refreshed. The default is 1 hour. + +``` +PREFECT_SERVER_CSRF_TOKEN_EXPIRATION='3600' # 1 hour in seconds + +``` + + +By default clients expect that CSRF protection is enabled on the server. If you are running a server without CSRF protection, you can disable CSRF support in the client. + +`PREFECT_CLIENT_CSRF_SUPPORT_ENABLED` - Enables or disables CSRF token handling in the Prefect client. When enabled, the client manages CSRF tokens for state-changing API requests. Defaults to True. + +``` +PREFECT_CLIENT_CSRF_SUPPORT_ENABLED=True + +``` + + +### Database settings + +If running a self-hosted Prefect server instance, there are several database configuration settings you can read about [here](https://docs.prefect.io/host/). + +### Logging settings + +Prefect provides several logging configuration settings that you can read about in the [logging docs](https://docs.prefect.io/concepts/logs/). + +Configuring settings +--------------------------------------------------------------- + +The `prefect config` CLI commands enable you to view, set, and unset settings. + + +|Command|Description | +|-------|----------------------------------------| +|set |Change the value for a setting. | +|unset |Restore the default value for a setting.| +|view |Display the current settings. | + + +### Viewing settings from the CLI + +The `prefect config view` command will display settings that override default values. + +``` +$ prefect config view +PREFECT_PROFILE="default" +PREFECT_LOGGING_LEVEL='DEBUG' + +``` + + +You can show the sources of values with `--show-sources`: + +``` +$ prefect config view --show-sources +PREFECT_PROFILE="default" +PREFECT_LOGGING_LEVEL='DEBUG' (from env) + +``` + + +You can also include default values with `--show-defaults`: + +``` +$ prefect config view --show-defaults +PREFECT_PROFILE='default' +PREFECT_AGENT_PREFETCH_SECONDS='10' (from defaults) +PREFECT_AGENT_QUERY_INTERVAL='5.0' (from defaults) +PREFECT_API_KEY='None' (from defaults) +PREFECT_API_REQUEST_TIMEOUT='60.0' (from defaults) +PREFECT_API_URL='None' (from defaults) +... + +``` + + +### Setting and clearing values + +The `prefect config set` command lets you change the value of a default setting. + +A commonly used example is setting the `PREFECT_API_URL`, which you may need to change when interacting with different Prefect server instances or Prefect Cloud. + +``` +# use a local Prefect server +prefect config set PREFECT_API_URL="http://127.0.0.1:4200/api" + +# use Prefect Cloud +prefect config set PREFECT_API_URL="https://api.prefect.cloud/api/accounts/[ACCOUNT-ID]/workspaces/[WORKSPACE-ID]" + +``` + + +If you want to configure a setting to use its default value, use the `prefect config unset` command. + +``` +prefect config unset PREFECT_API_URL + +``` + + +### Overriding defaults with environment variables + +All settings have keys that match the environment variable that can be used to override them. + +For example, configuring the home directory: + +``` +# environment variable +export PREFECT_HOME="/path/to/home" + +``` + + +``` +# python +import prefect.settings +prefect.settings.PREFECT_HOME.value() # PosixPath('/path/to/home') + +``` + + +Configuring the a server instance's port: + +``` +# environment variable +export PREFECT_SERVER_API_PORT=4242 + +``` + + +``` +# python +prefect.settings.PREFECT_SERVER_API_PORT.value() # 4242 + +``` + + +Configuration profiles +------------------------------------------------------------------- + +Prefect allows you to persist settings instead of setting an environment variable each time you open a new shell. Settings are persisted to profiles, which allow you to move between groups of settings quickly. + +The `prefect profile` CLI commands enable you to create, review, and manage profiles. + + +|Command|Description | +|-------|----------------------------------------------------------| +|create |Create a new profile. | +|delete |Delete the given profile. | +|inspect|Display settings from a given profile; defaults to active.| +|ls |List profile names. | +|rename |Change the name of a profile. | +|use |Switch the active profile. | + + +If you configured settings for a profile, `prefect profile inspect` displays those settings: + +```ruby +$ prefect profile inspect +PREFECT_PROFILE = "default" +PREFECT_API_KEY = "pnu_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" +PREFECT_API_URL = "http://127.0.0.1:4200/api" + +``` + + +You can pass the name of a profile to view its settings: + +```ruby +$ prefect profile create test +$ prefect profile inspect test +PREFECT_PROFILE="test" + +``` + + +### Creating and removing profiles + +Create a new profile with no settings: + +```ruby +$ prefect profile create test +Created profile 'test' at /Users/terry/.prefect/profiles.toml. + +``` + + +Create a new profile `foo` with settings cloned from an existing `default` profile: + +```ruby +$ prefect profile create foo --from default +Created profile 'cloud' matching 'default' at /Users/terry/.prefect/profiles.toml. + +``` + + +Rename a profile: + +```ruby +$ prefect profile rename temp test +Renamed profile 'temp' to 'test'. + +``` + + +Remove a profile: + +```ruby +$ prefect profile delete test +Removed profile 'test'. + +``` + + +Removing the default profile resets it: + +```ruby +$ prefect profile delete default +Reset profile 'default'. + +``` + + +### Change values in profiles + +Set a value in the current profile: + +```ruby +$ prefect config set VAR=X +Set variable 'VAR' to 'X' +Updated profile 'default' + +``` + + +Set multiple values in the current profile: + +```ruby +$ prefect config set VAR2=Y VAR3=Z +Set variable 'VAR2' to 'Y' +Set variable 'VAR3' to 'Z' +Updated profile 'default' + +``` + + +You can set a value in another profile by passing the `--profile NAME` option to a CLI command: + +```ruby +$ prefect --profile "foo" config set VAR=Y +Set variable 'VAR' to 'Y' +Updated profile 'foo' + +``` + + +Unset values in the current profile to restore the defaults: + +```ruby +$ prefect config unset VAR2 VAR3 +Unset variable 'VAR2' +Unset variable 'VAR3' +Updated profile 'default' + +``` + + +### Inspecting profiles + +See a list of available profiles: + +```ruby +$ prefect profile ls +* default +cloud +test +local + +``` + + +View all settings for a profile: + +```ruby +$ prefect profile inspect cloud +PREFECT_API_URL='https://api.prefect.cloud/api/accounts/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxx +x/workspaces/xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' +PREFECT_API_KEY='xxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' + +``` + + +### Using profiles + +The profile named `default` is used by default. There are several methods to switch to another profile. + +The recommended method is to use the `prefect profile use` command with the name of the profile: + +```ruby +$ prefect profile use foo +Profile 'test' now active. + +``` + + +Alternatively, you can set the environment variable `PREFECT_PROFILE` to the name of the profile: + +```ruby +export PREFECT_PROFILE=foo + +``` + + +Or, specify the profile in the CLI command for one-time usage: + +```ruby +prefect --profile "foo" ... + +``` + + +Note that this option must come before the subcommand. For example, to list flow runs using the profile `foo`: + +```ruby +prefect --profile "foo" flow-run ls + +``` + + +You may use the `-p` flag as well: + +```ruby +prefect -p "foo" flow-run ls + +``` + + +You may also create an 'alias' to automatically use your profile: + +```ruby +$ alias prefect-foo="prefect --profile 'foo' " +# uses our profile! +$ prefect-foo config view + +``` + + +Conflicts with environment variables +----------------------------------------------------------------------------------------------- + +If setting the profile from the CLI with `--profile`, environment variables that conflict with settings in the profile will be ignored. + +In all other cases, environment variables will take precedence over the value in the profile. + +For example, a value set in a profile will be used by default: + +```ruby +$ prefect config set PREFECT_LOGGING_LEVEL="ERROR" +$ prefect config view --show-sources +PREFECT_PROFILE="default" +PREFECT_LOGGING_LEVEL='ERROR' (from profile) + +``` + + +But, setting an environment variable will override the profile setting: + +```ruby +$ export PREFECT_LOGGING_LEVEL="DEBUG" +$ prefect config view --show-sources +PREFECT_PROFILE="default" +PREFECT_LOGGING_LEVEL='DEBUG' (from env) + +``` + + +Unless the profile is explicitly requested when using the CLI: + +```ruby +$ prefect --profile default config view --show-sources +PREFECT_PROFILE="default" +PREFECT_LOGGING_LEVEL='ERROR' (from profile) + +``` + + +Profile files +------------------------------------------------- + +Profiles are persisted to the file location specified by `PREFECT_PROFILES_PATH`. The default location is a `profiles.toml` file in the `PREFECT_HOME` directory: + +```ruby +$ prefect config view --show-defaults +... +PREFECT_PROFILES_PATH='${PREFECT_HOME}/profiles.toml' +... + +``` + + +The [TOML](https://toml.io/en/) format is used to store profile data. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/development/recipes.mdx b/docs/2.19.x/how-to-guides/development/recipes.mdx new file mode 100644 index 000000000000..e2fbc9b0c914 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/recipes.mdx @@ -0,0 +1,103 @@ +--- +title: Recipes +--- + +Recipes are useful when you are looking for tutorials on how to deploy a worker, use event-driven flows, set up unit testing, and more. + +The following are Prefect recipes specific to Prefect 2. You can find a full repository of recipes at [https://github.com/PrefectHQ/prefect-recipes](https://github.com/PrefectHQ/prefect-recipes) and additional recipes at [Prefect Discourse](https://discourse.prefect.io/). + +Recipe catalog +--------------------------------------------------- + +[](https://dlthub.com/devel/walkthroughs/deploy-a-pipeline/deploy-with-prefect) + +### [Deploy a dlt pipeline on Prefect](https://dlthub.com/devel/walkthroughs/deploy-a-pipeline/deploy-with-prefect) + +[](https://dlthub.com/devel/walkthroughs/deploy-a-pipeline/deploy-with-prefect) + +dlt is an open-source Python library that enables the declarative loading of data sources into well-structured tables or datasets by automatically inferring and evolving schemas. + +Maintained by [Prefect](https://dlthub.com/) + +This recipe uses: + +![](https://docs.prefect.io/img/collections/dlthub.png) + +Contributing recipes +--------------------------------------------------------------- + +We're always looking for new recipe contributions! See the [Prefect Recipes](https://github.com/PrefectHQ/prefect-recipes#contributing--swag-) repository for details on how you can add your Prefect recipe, share best practices with fellow Prefect users, and earn some swag. + +[Prefect recipes](https://github.com/PrefectHQ/prefect-recipes) provide a vital cookbook where users can find helpful code examples and, when appropriate, common steps for specific Prefect use cases. + +We love recipes from anyone who has example code that another Prefect user can benefit from (e.g. a Prefect flow that loads data into Snowflake). + +Have a blog post, Discourse article, or tutorial you’d like to share as a recipe? All submissions are welcome. Clone the prefect-recipes repo, create a branch, add a link to your recipe to the README, and submit a PR. Have more questions? Read on. + +What is a recipe? +-------------------------------------------------------- + +A Prefect recipe is like a cookbook recipe: it tells you what you need — the ingredients — and some basic steps, but assumes you can put the pieces together. Think of the Hello Fresh meal experience, but for dataflows. + +A tutorial, on the other hand, is Julia Child holding your hand through the entire cooking process: explaining each ingredient and procedure, demonstrating best practices, pointing out potential problems, and generally making sure you can’t stray from the happy path to a delicious meal. + +We love Julia, and we love tutorials. But we don’t expect that a Prefect recipe should handhold users through every step and possible contingency of a solution. A recipe can start from an expectation of more expertise and problem-solving ability on the part of the reader. + +To see an example of a high quality recipe, check out **[Serverless with AWS Chalice](https://github.com/PrefectHQ/prefect-recipes/tree/main/flows-advanced/serverless)**. This recipe includes all of the elements we like to see. + +Steps to add your recipe +----------------------------------------------------------------------- + +Here’s our guide to creating a recipe: + +``` +# Clone the repository +git clone git@github.com:PrefectHQ/prefect-recipes.git +cd prefect-recipes + +# Create and checkout a new branch + +git checkout -b new_recipe_branch_name + +``` + + +1. [Add your recipe](#what-are-the-common-ingredients-of-a-good-recipe). Your code may simply be a copy/paste of a single Python file or an entire folder. Unsure of where to add your file or folder? Just add under the `flows-advanced/` folder. A Prefect Recipes maintainer will help you find the best place for your recipe. Just want to direct others to a project you made, whether it be a repo or a blogpost? Simply link to it in the [Prefect Recipes README](https://github.com/PrefectHQ/prefect-recipes#readme)! +2. (Optional) Write a [README](#what-are-some-tips-for-a-good-recipe-readme). +3. Include a dependencies file, if applicable. +4. Push your code and make a PR to the repository. + +That’s it! + +What makes a good recipe? +------------------------------------------------------------------------ + +Every recipe is useful, as other Prefect users can adapt the recipe to their needs. Particularly good ones help a Prefect user bake a great dataflow solution! Take a look at the [prefect-recipes repo](https://github.com/PrefectHQ/prefect-recipes) to see some examples. + +What are the common ingredients of a good recipe? +------------------------------------------------------------------------------------------------------------------------ + +* Easy to understand: Can a user easily follow your recipe? Would a README or code comments help? A simple explanation providing context on how to use the example code is useful, but not required. A good README can set a recipe apart, so we have some additional suggestions for README files below. +* Code and more: Sometimes a use case is best represented in Python code or shell scripts. Sometimes a configuration file is the most important artifact — think of a Dockerfile or Terraform file for configuring infrastructure. +* All-inclusive: Share as much code as you can. Even boilerplate code like Dockerfiles or Terraform or Helm files are useful. Just _don’t share company secrets or IP_. +* Specific: Don't worry about generalizing your code, aside from removing anything internal/secret! Other users will extrapolate their own unique solutions from your example. + +What are some tips for a good recipe README? +-------------------------------------------------------------------------------------------------------------- + +A thoughtful README can take a recipe from good to great. Here are some best practices that we’ve found make for a great recipe README: + +* Provide a brief explanation of what your recipe demonstrates. This helps users determine quickly whether the recipe is relevant to their needs or answers their questions. +* List which files are included and what each is meant to do. Each explanation can contain only a few words. +* Describe any dependencies and prerequisites (in addition to any dependencies you include in a requirements file). This includes both libraries or modules and any services your recipes depends on. +* If steps are involved or there’s an order to do things, a simple list of steps is helpful. +* Bonus: troubleshooting steps you encountered to get here or tips where other users might get tripped up. + +Next steps +------------------------------------------- + +We hope you’ll feel comfortable sharing your Prefect solutions as recipes in the [prefect-recipes repo](https://github.com/PrefectHQ/prefect-recipes#contributions). Collaboration and knowledge sharing are defining attributes of our [Prefect Community](https://www.prefect.io/slack)! + +Have questions about sharing or using recipes? Reach out on our active [Prefect Slack Community](https://www.prefect.io/slack)! + +Happy engineering! \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/development/runtime-context.mdx b/docs/2.19.x/how-to-guides/development/runtime-context.mdx new file mode 100644 index 000000000000..3cb65e5a497b --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/runtime-context.mdx @@ -0,0 +1,97 @@ +--- +title: Get Information about the Runtime Context +sidebarTitle: Runtime Context +--- + +Get Information about the Runtime Context +--------------------------------------------------------------------------------------------------------- + +Prefect tracks information about the current flow or task run with a run context. The run context can be thought of as a global variable that allows the Prefect engine to determine relationships between your runs, such as which flow your task was called from. + +The run context itself contains many internal objects used by Prefect to manage execution of your run and is only available in specific situations. For this reason, we expose a simple interface that only includes the items you care about and dynamically retrieves additional information when necessary. We call this the "runtime context" as it contains information that can be accessed only when a run is happening. + +**Mock values via environment variable** + +Oftentimes, you may want to mock certain values for testing purposes. For example, by manually setting an ID or a scheduled start time to ensure your code is functioning properly. Starting in version `2.10.3`, you can mock values in runtime via environment variable using the schema `PREFECT__RUNTIME__{SUBMODULE}__{KEY_NAME}=value`: + +```python +$ export PREFECT__RUNTIME__TASK_RUN__FAKE_KEY='foo' +$ python -c 'from prefect.runtime import task_run; print(task_run.fake_key)' # "foo" + +``` + + +If the environment variable mocks an existing runtime attribute, the value is cast to the same type. This works for runtime attributes of basic types (`bool`, `int`, `float` and `str`) and `pendulum.DateTime`. For complex types like `list` or `dict`, we suggest mocking them using [monkeypatch](https://docs.pytest.org/en/latest/how-to/monkeypatch.html) or a similar tool. + + +Accessing runtime information +--------------------------------------------------------------------------------- + +The `prefect.runtime` module is the home for all runtime context access. Each major runtime concept has its own submodule: + +* `deployment`: Access information about the deployment for the current run +* `flow_run`: Access information about the current flow run +* `task_run`: Access information about the current task run + +For example: + + +```python my_runtime_info.py +from prefect import flow, task +from prefect import runtime + +@flow(log_prints=True) +def my_flow(x): + print("My name is", runtime.flow_run.name) + print("I belong to deployment", runtime.deployment.name) + my_task(2) + +@task +def my_task(y): + print("My name is", runtime.task_run.name) + print("Flow run parameters:", runtime.flow_run.parameters) + +my_flow(1) + +``` + + +Running this file will produce output similar to the following: + +```python +10:08:02.948 | INFO | prefect.engine - Created flow run 'solid-gibbon' for flow 'my-flow' +10:08:03.555 | INFO | Flow run 'solid-gibbon' - My name is solid-gibbon +10:08:03.558 | INFO | Flow run 'solid-gibbon' - I belong to deployment None +10:08:03.703 | INFO | Flow run 'solid-gibbon' - Created task run 'my_task-0' for task 'my_task' +10:08:03.704 | INFO | Flow run 'solid-gibbon' - Executing 'my_task-0' immediately... +10:08:04.006 | INFO | Task run 'my_task-0' - My name is my_task-0 +10:08:04.007 | INFO | Task run 'my_task-0' - Flow run parameters: {'x': 1} +10:08:04.105 | INFO | Task run 'my_task-0' - Finished in state Completed() +10:08:04.968 | INFO | Flow run 'solid-gibbon' - Finished in state Completed('All states completed.') + +``` + + +Above, we demonstrated access to information about the current flow run, task run, and deployment. When run without a deployment (via `python my_runtime_info.py`), you should see `"I belong to deployment None"` logged. When information is not available, the runtime will always return an empty value. Because this flow was run outside of a deployment, there is no deployment data. If this flow was run as part of a deployment, we'd see the name of the deployment instead. + +See the [runtime API reference](https://docs.prefect.io/api-ref/prefect/runtime/flow_run/) for a full list of available attributes. + +Accessing the run context directly +------------------------------------------------------------------------------------------- + +The current run context can be accessed with `prefect.context.get_run_context()`. This function will raise an exception if no run context is available, meaning you are not in a flow or task run. If a task run context is available, it will be returned even if a flow run context is available. + +Alternatively, you can access the flow run or task run context explicitly. This will, for example, allow you to access the flow run context from a task run. + +Note that we do not send the flow run context to distributed task workers because the context is costly to serialize and deserialize. + +```python +from prefect.context import FlowRunContext, TaskRunContext + +flow_run_ctx = FlowRunContext.get() +task_run_ctx = TaskRunContext.get() + +``` + + +Unlike `get_run_context`, these method calls will not raise an error if the context is not available. Instead, they will return `None`. diff --git a/docs/2.19.x/how-to-guides/development/specify-upstream-dependencies.mdx b/docs/2.19.x/how-to-guides/development/specify-upstream-dependencies.mdx new file mode 100644 index 000000000000..f4b699c18046 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/specify-upstream-dependencies.mdx @@ -0,0 +1,254 @@ +--- +title: Specifying Upstream Dependencies +--- + +Results from a task can be provided to other tasks (or subflows) as upstream dependencies. Prefect uses upstream dependencies in two ways: + +1. To populate dependency arrows in the flow run graph +2. To determine execution order for [concurrently submitted](https://docs.prefect.io/concepts/task-runners) units of work that depend on each other + + +**Tasks vs. other functions** + +**Only results from tasks** inform Prefect's ability to determine dependencies. Return values from functions without task decorators, including subflows, do not carry the same information about their origin as task results. + +When using non-sequential task runners such as the [`ConcurrentTaskRunner`](https://docs.prefect.io/api-ref/prefect/task-runners/#prefect.task_runners.ConcurrentTaskRunner) or [`DaskTaskRunner`](https://prefecthq.github.io/prefect-dask/), the order of execution for submitted tasks is not guaranteed unless their dependencies are specified. + +For example, compare how tasks submitted to the `ConcurrentTaskRunner` behave with and without upstream dependencies by clicking on the tabs below. + + + +```python +@flow(log_prints=True) # Default task runner is ConcurrentTaskRunner +def flow_of_tasks(): + # no dependencies, execution is order not guaranteed + first.submit() + second.submit() + third.submit() + +@task +def first(): + print("I'm first!") + +@task +def second(): + print("I'm second!") + +@task +def third(): + print("I'm third!") + +``` + + +``` +Flow run 'pumpkin-puffin' - Created task run 'first-0' for task 'first' +Flow run 'pumpkin-puffin' - Submitted task run 'first-0' for execution. +Flow run 'pumpkin-puffin' - Created task run 'second-0' for task 'second' +Flow run 'pumpkin-puffin' - Submitted task run 'second-0' for execution. +Flow run 'pumpkin-puffin' - Created task run 'third-0' for task 'third' +Flow run 'pumpkin-puffin' - Submitted task run 'third-0' for execution. +Task run 'third-0' - I'm third! +Task run 'first-0' - I'm first! +Task run 'second-0' - I'm second! +Task run 'second-0' - Finished in state Completed() +Task run 'third-0' - Finished in state Completed() +Task run 'first-0' - Finished in state Completed() +Flow run 'pumpkin-puffin' - Finished in state Completed('All states completed.') +``` + + +```python +@flow(log_prints=True) # Default task runner is ConcurrentTaskRunner +def flow_of_tasks(): + # with dependencies, tasks execute in order + first_result = first.submit() + second_result = second.submit(first_result) + third.submit(second_result) + +@task +def first(): + print("I'm first!") + +@task +def second(input): + print("I'm second!") + +@task +def third(input): + print("I'm third!") +``` + + +``` +Flow run 'statuesque-waxbill' - Created task run 'first-0' for task 'first' +Flow run 'statuesque-waxbill' - Submitted task run 'first-0' for execution. +Flow run 'statuesque-waxbill' - Created task run 'second-0' for task 'second' +Flow run 'statuesque-waxbill' - Submitted task run 'second-0' for execution. +Flow run 'statuesque-waxbill' - Created task run 'third-0' for task 'third' +Flow run 'statuesque-waxbill' - Submitted task run 'third-0' for execution. +Task run 'first-0' - I'm first! +Task run 'first-0' - Finished in state Completed() +Task run 'second-0' - I'm second! +Task run 'second-0' - Finished in state Completed() +Task run 'third-0' - I'm third! +Task run 'third-0' - Finished in state Completed() +Flow run 'statuesque-waxbill' - Finished in state Completed('All states completed.') +``` + + + + + + + +Determination methods +----------------------------------------------------------------- + +A task or subflow's upstream dependencies can be inferred automatically via its inputs, or stated explicitly via the `wait_for` parameter. + +### Automatic + +When a result from a task is used as input for another task, Prefect automatically recognizes the task that result originated from as an upstream dependency. + +This applies to every way you can run tasks with Prefect, whether you're calling the task function directly, calling [`.submit()`](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.Task.submit), or calling [`.map()`](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.Task.map). Subflows similarly recognize tasks results as upstream dependencies. + +```python +from prefect import flow, task + + +@flow(log_prints=True) +def flow_of_tasks(): + upstream_result = upstream.submit() + downstream_1_result = downstream_1.submit(upstream_result) + downstream_2_result = downstream_2.submit(upstream_result) + mapped_task_results = mapped_task.map([downstream_1_result, downstream_2_result]) + final_task(mapped_task_results) + +@task +def upstream(): + return "Hello from upstream!" + +@task +def downstream_1(input): + return input + +@task +def downstream_2(input): + return input + +@task +def mapped_task(input): + return input + +@task +def final_task(input): + print(input) + +``` + + +![Flow run graph for automatic task dependencies](/images/upstream1.png) + + + +### Manual + +Tasks that do not share data can be informed of their upstream dependencies through the `wait_for` parameter. Just as with automatic dependencies, this applies to direct task function calls, [`.submit()`](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.Task.submit), [`.map()`](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.Task.map), and subflows. + + +**Differences with `.map()`** + +Manually defined upstream dependencies apply to all tasks submitted by `.map()`, so each mapped task must wait for _all_ upstream dependencies passed into `wait_for` to finish. This is distinct from automatic dependencies for mapped tasks, where each mapped task must only wait for the upstream tasks whose results it depends on. + + +```python +from prefect import flow, task + + +@flow(log_prints=True) +def flow_of_tasks(): + upstream_result = upstream.submit() + downstream_1_result = downstream_1.submit(wait_for=[upstream_result]) + downstream_2_result = downstream_2.submit(wait_for=[upstream_result]) + mapped_task_results = mapped_task.map([1, 2], wait_for=[downstream_1_result, downstream_2_result]) + final_task(wait_for=mapped_task_results) + +@task +def upstream(): + pass + +@task +def downstream_1(): + pass + +@task +def downstream_2(): + pass + +@task +def mapped_task(input): + pass + +@task +def final_task(): + pass + +``` + + +![Flow run graph for manual task dependencies](/images/upstream2.png) + + +Deployments as dependencies +----------------------------------------------------------------------------- + +For more complex workflows, parts of your logic may require additional resources, different infrastructure, or independent parallel execution. A typical approach for addressing these needs is to execute that logic as separate [deployment](https://docs.prefect.io/concepts/deployments) runs from within a flow. + +Composing deployment runs into a flow so that they can be treated as upstream dependencies is as simple as calling [`run_deployment`](https://docs.prefect.io/api-ref/prefect/deployments/deployments/#prefect.deployments.deployments.run_deployment) from within a task. + +Given a deployment `process-user` of flow `parallel-work`, a flow of deployments might look like this: + +```python +from prefect import flow, task +from prefect.deployments import run_deployment + + +@flow +def flow_of_deployments(): + deployment_run_1 = run_deployment_task.submit( + flow_name="parallel-work", + deployment_name="process-user", + parameters={"user_id": 1}, + ) + deployment_run_2 = run_deployment_task.submit( + flow_name="parallel-work", + deployment_name="process-user", + parameters={"user_id": 2}, + ) + downstream_task(wait_for=[deployment_run_1, deployment_run_2]) + + +@task(task_run_name="Run deployment {flow_name}/{deployment_name}") +def run_deployment_task( + flow_name: str, + deployment_name: str, + parameters: dict +): + run_deployment( + name=f"{flow_name}/{deployment_name}", + parameters=parameters + ) + + +@task +def downstream_task(): + print("I'm downstream!") + +``` + + +By default, deployments started from `run_deployment` will also appear as subflows for tracking purposes. This behavior can be disabled by setting the `as_subflow` parameter for `run_deployment` to `False`. + +![Flow of deployments](/images/upstream3.png) + diff --git a/docs/2.19.x/how-to-guides/development/testing.mdx b/docs/2.19.x/how-to-guides/development/testing.mdx new file mode 100644 index 000000000000..de4887a1d177 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/testing.mdx @@ -0,0 +1,90 @@ +--- +title: Testing +description: Once you have some awesome flows, you probably want to test them! +--- + + +Unit testing flows +----------------------------------------------------------- + +Prefect provides a simple context manager for unit tests that allows you to run flows and tasks against a temporary local SQLite database. + +```python +from prefect import flow +from prefect.testing.utilities import prefect_test_harness + +@flow +def my_favorite_flow(): + return 42 + +def test_my_favorite_flow(): + with prefect_test_harness(): + # run the flow against a temporary testing database + assert my_favorite_flow() == 42 + +``` + + +For more extensive testing, you can leverage `prefect_test_harness` as a fixture in your unit testing framework. For example, when using `pytest`: + +```python +from prefect import flow +import pytest +from prefect.testing.utilities import prefect_test_harness + +@pytest.fixture(autouse=True, scope="session") +def prefect_test_fixture(): + with prefect_test_harness(): + yield + +@flow +def my_favorite_flow(): + return 42 + +def test_my_favorite_flow(): + assert my_favorite_flow() == 42 + +``` + + +**Note** + +In this example, the fixture is scoped to run once for the entire test session. In most cases, you will not need a clean database for each test and just want to isolate your test runs to a test database. Creating a new test database per test creates significant overhead, so we recommend scoping the fixture to the session. If you need to isolate some tests fully, you can use the test harness again to create a fresh database. + + +Unit testing tasks +----------------------------------------------------------- + +To test an individual task, you can access the original function using `.fn`: + +```python +from prefect import flow, task + +@task +def my_favorite_task(): + return 42 + +@flow +def my_favorite_flow(): + val = my_favorite_task() + return val + +def test_my_favorite_task(): + assert my_favorite_task.fn() == 42 + +``` + + +**Disable logger** + +If your task makes uses a logger, you can disable the logger in order to avoid the `RuntimeError` raised from a missing flow context. + +```python +from prefect.logging import disable_run_logger + +def test_my_favorite_task(): + with disable_run_logger(): + assert my_favorite_task.fn() == 42 + +``` + diff --git a/docs/2.19.x/how-to-guides/development/third-party-secrets.mdx b/docs/2.19.x/how-to-guides/development/third-party-secrets.mdx new file mode 100644 index 000000000000..30c66e79f705 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/third-party-secrets.mdx @@ -0,0 +1,173 @@ +--- +title: "Third-party Secrets: Connect to services without storing credentials in blocks" +sidebarTitle: Third Party Secrets +--- + +Credentials blocks and secret blocks are popular ways to store and retrieve sensitive information for connecting to third-party services. + +In Prefect Cloud, these block values are stored in encrypted format. Organizations whose security policies make such storage infeasible can still use Prefect to connect to third-party services securely. + +Any sensitive information that is not stored in a block can be read from the environment. + +For example, to find AWS credentials for authentication, any attributes not provided to an AWS Credentials block are sourced at runtime in the order shown in the [Boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#configuring-credentials). Prefect-aws creates the session object using the values in the block and then, any missing values follow the sequence in the Boto3 docs. + +Prefect-gcp and prefect-azure follow similar patterns. + +In the example below, we interact with a Snowflake database using credentials stored in AWS Secrets Manager. This example can be generalized to other third party services that require credentials. + +Prerequisites +------------------------------------------------- + +1. Prefect [installed](https://docs.prefect.io/getting-started/installation). +2. CLI authenticated to your [Prefect Cloud](https://app.prefect.cloud/) account. +3. [Snowflake account](https://www.snowflake.com/). +4. [AWS account](https://aws.amazon.com/). + +Steps +--------------------------------- + +1. Install `prefect-aws` and `prefect-snowflake` integration libraries. +2. Store Snowflake password in AWS Secrets Manager. +3. Create `AwsSecret` block to access the Snowflake password. +4. Create `AwsCredentials` block for authentication. +5. Ensure the compute environment has access to AWS credentials that are authorized to access the secret in AWS. +6. Create and use `SnowflakeCredentials` and `SnowflakeConnector` blocks in Python code to interact with Snowflake. + +### Install `prefect-aws` and `prefect-snowflake` libraries + +The following code will install and upgrade the necessary libraries and their dependencies. + +``` +pip install -U prefect-aws prefect-snowflake + +``` + + +### Store Snowflake password in AWS Secrets Manager + +Go to the [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) console and create a new secret. Alternatively, create a secret using the AWS CLI or a script. + +1. In the UI, choose **Store a new secret**. +2. Select **Other type of secret**. +3. Input the key-value pair for your Snowflake password where the key is any string and the value is your Snowflake password. +4. Copy the key for future reference and click **Next**. +5. Enter a name for your secret, copy the name, and click **Next**. +6. For this demo, we won't rotate the key, so click **Next**. +7. Click **Store**. + +### Create `AwsSecret` block to access your Snowflake password + +You can create blocks with Python code or via the Prefect UI. Block creation through the UI can help you visualize how the pieces fit together, so let's use it here. + +On the Blocks page, click on **+** to add a new block and select **AWS Secret** from the list of block types. Enter a name for your block and enter the secret name from AWS Secrets Manager. + +Note that if you're using a self-hosted Prefect server instance, you'll need to register the block types in the newly installed modules before creating blocks. + +``` +prefect block register -m prefect_aws && prefect block register -m prefect_snowflake + +``` + + +### Create `AwsCredentials` block + +Under the hood, Prefect is using the AWS `boto3` client to create a session. + +In the **AwsCredentials** section of the form, click **Add +** and create an AWS Credentials block by entering the necessary values. + +Values for **Access Key ID** and **Secret Access Key** will be read from the compute environment. My AWS **Access Key ID** and **Secret Access Key** values with permissions to read the AWS Secret are stored locally in my `~/.aws/credentials` file, so I'll leave those fields blank. You could enter those values at block creation, but then they would be saved to the database, and that's what we're trying to avoid. By leaving those attributes blank, Prefect knows to look to the compute environment. If the compute environment contains the necessary credentials, Prefect will use them to authenticate in the order shown in the [Boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html#configuring-credentials). + +The same order is followed to resolve the AWS region. Let's specify the region in our `AWSCredentials` block so that our connection works regardless of the contents of our local AWS config file or whether we run our code on AWS compute located in anther region than our secret. + +Click **Create** to save the blocks. + +### Ensure the compute environment has access to AWS credentials + +Ensure the compute environment contains AWS credentials with authorization to access AWS Secrets Manager. When we connect to Snowflake, Prefect will automatically use these credentials to authenticate and access the AWS secret that contains the Snowflake password. + +### Create and use `SnowflakeCredentials` and `SnowflakeConnector` blocks in Python code + +Let's use Prefect's blocks for convenient access to Snowflake. We won't save the blocks, to ensure the credentials are not stored in Prefect Cloud. + +We'll create a flow that connects to Snowflake and calls two tasks. The first task creates a table and inserts some data. The second task reads the data out. + +```json +import json +from prefect import flow, task +from prefect_aws import AwsSecret +from prefect_snowflake import SnowflakeConnector, SnowflakeCredentials + + +@task +def setup_table(snow_connector: SnowflakeConnector) -> None: + with snow_connector as connector: + connector.execute( + "CREATE TABLE IF NOT EXISTS customers (name varchar, address varchar);" + ) + connector.execute_many( + "INSERT INTO customers (name, address) VALUES (%(name)s, %(address)s);", + seq_of_parameters=[ + {"name": "Ford", "address": "Highway 42"}, + {"name": "Unknown", "address": "Space"}, + {"name": "Me", "address": "Myway 88"}, + ], + ) + + +@task +def fetch_data(snow_connector: SnowflakeConnector) -> list: + all_rows = [] + with snow_connector as connector: + while True: + new_rows = connector.fetch_many("SELECT * FROM customers", size=2) + if len(new_rows) == 0: + break + all_rows.append(new_rows) + return all_rows + + +@flow(log_prints=True) +def snowflake_flow(): + aws_secret_block = AwsSecret.load("my-snowflake-pw") + + snow_connector = SnowflakeConnector( + schema="MY_SCHEMA", + database="MY_DATABASE", + warehouse="COMPUTE_WH", + fetch_size=1, + credentials=SnowflakeCredentials( + role="MYROLE", + user="MYUSERNAME", + account="ab12345.us-east-2.aws", + password=json.loads(aws_secret_block.read_secret()).get("my-snowflake-pw"), + ), + poll_frequency_s=1, + ) + + setup_table(snow_connector) + all_rows = fetch_data(snow_connector) + print(all_rows) + + +if __name__ == "__main__": + snowflake_flow() + +``` + + +Fill in the relevant details for your Snowflake account and run the script. + +Note that the flow reads the Snowflake password from the AWS Secret Manager and uses it in the `SnowflakeCredentials` block. The `SnowflakeConnector` block uses the nested `SnowflakeCredentials` block to connect to Snowflake. Again, neither of the Snowflake blocks are saved, so the credentials are not stored in Prefect Cloud. + +Check out the [`prefect-snowflake` docs](https://docs.prefect.io/integrations/prefect-snowflake) for more examples of working with Snowflake. + +Next steps +------------------------------------------- + +Now you can turn your flow into a [deployment](https://docs.prefect.io/guides/prefect-deploy/) so that you and your team can run it remotely on a schedule, in response to an event, or manually. + +Make sure to specify the `prefect-aws` and `prefect-snowflake` dependencies in your work pool or deployment so that they are available at runtime. + +Also ensure your compute has the AWS credentials for accessing the secret in AWS Secrets Manager. + +You've seen how to use Prefect blocks to store non-sensitive configuration and fetch sensitive configuration values from the environment. You can use this pattern to connect to other third-party services that require credentials, such as databases and APIs. You can use a similar pattern with any secret manager, or extend it to work with environment variables. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/development/variables.mdx b/docs/2.19.x/how-to-guides/development/variables.mdx new file mode 100644 index 000000000000..ce1bd509570f --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/variables.mdx @@ -0,0 +1,107 @@ +--- +title: Variables +--- + +Variables enable you to store and reuse non-sensitive bits of data, such as configuration information. Variables are named, mutable string values, much like environment variables. Variables are scoped to a Prefect server instance or a single workspace in Prefect Cloud. + +Variables can be created or modified at any time, but are intended for values with infrequent writes and frequent reads. Variable values may be cached for quicker retrieval. + +While variable values are most commonly loaded during flow runtime, they can be loaded in other contexts, at any time, such that they can be used to pass configuration information to Prefect configuration files, such as deployment steps. + + +**Variables are not Encrypted** + +Using variables to store sensitive information, such as credentials, is not recommended. Instead, use [Secret blocks](https://docs.prefect.io/concepts/blocks/#prefect-built-in-blocks) to store and access sensitive information. + + +Managing variables +----------------------------------------------------------- + +You can create, read, edit and delete variables via the Prefect UI, API, and CLI. Names must adhere to traditional variable naming conventions: + +* Have no more than 255 characters. +* Only contain lowercase alphanumeric characters (\[a-z\], \[0-9\]) or underscores (\_). Spaces are not allowed. +* be unique. + +Values must: + +* have less than or equal to 5000 characters. + +Optionally, you can add tags to the variable. + +### Via the Prefect UI + +You can see all the variables in your Prefect server instance or Prefect Cloud workspace on the **Variables** page of the Prefect UI. Both the name and value of all variables are visible to anyone with access to the server or workspace. + +To create a new variable, select the **+** button next to the header of the **Variables** page. Enter the name and value of the variable. + +![variables-ui](/images/variables1.png) + +### Via the REST API + +Variables can be created and deleted via the REST API. You can also set and get variables via the API with either the variable name or ID. See the [REST reference](https://app.prefect.cloud/api/docs#tag/Variables) for more information. + +### Via the CLI + +You can list, inspect, and delete variables via the command line interface with the `prefect variable ls`, `prefect variable inspect `, and `prefect variable delete ` commands, respectively. + +Accessing variables +------------------------------------------------------------- + +In addition to the UI and API, variables can be referenced in code and in certain Prefect configuration files. + +### In Python code + +You can access any variable via the Python SDK via the `Variable.get()` method. If you attempt to reference a variable that does not exist, the method will return `None`. You can create variables via the Python SDK with the `Variable.set()` method. Note that if a variable of the same name exists, you'll need to pass `overwrite=True`. + +```python +from prefect.variables import Variable + +# setting the variable +variable = Variable.set(name="the_answer", value="42") + +# getting from a synchronous context +answer = Variable.get('the_answer') +print(answer.value) +# 42 + +# getting from an asynchronous context +answer = await Variable.get('the_answer') +print(answer.value) +# 42 + +# getting without a default value +answer = Variable.get('not_the_answer') +print(answer.value) +# None + +# getting with a default value +answer = Variable.get('not_the_answer', default='42') +print(answer.value) +# 42 + +# using `overwrite=True` +answer = Variable.get('the_answer') +print(answer.value) +#42 +answer = Variable.set(name="the_answer", value="43", overwrite=True) +print(answer.value) +#43 + +``` + + +### In `prefect.yaml` deployment steps + +In `.yaml` files, variables are denoted by quotes and double curly brackets, like so: `"{{ prefect.variables.my_variable }}"`. You can use variables to templatize deployment steps by referencing them in the `prefect.yaml` file used to create deployments. For example, you could pass a variable in to specify a branch for a git repo in a deployment `pull` step: + +``` +pull: +- prefect.deployments.steps.git_clone: + repository: https://github.com/PrefectHQ/hello-projects.git + branch: "{{ prefect.variables.deployment_branch }}" + +``` + + +The `deployment_branch` variable will be evaluated at runtime for the deployed flow, allowing changes to be made to variables used in a pull action without updating a deployment directly. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/development/webhooks.mdx b/docs/2.19.x/how-to-guides/development/webhooks.mdx new file mode 100644 index 000000000000..c499fb6c2277 --- /dev/null +++ b/docs/2.19.x/how-to-guides/development/webhooks.mdx @@ -0,0 +1,251 @@ +--- +title: Webhooks +--- +Use webhooks in your Prefect Cloud workspace to receive, observe, and react to events from other systems in your ecosystem. Each webhook exposes a unique URL endpoint to receive events from other systems and transforms them into Prefect [events](https://docs.prefect.io/cloud/events/) for use in [automations](https://docs.prefect.io/cloud/automations/). + +Webhooks are defined by two essential components: a unique URL and a template that translates incoming web requests to a Prefect event. + +Configuring webhooks +--------------------------------------------------------------- + +### Via the Prefect Cloud API + +Webhooks are managed via the [Webhooks API endpoints](https://app.prefect.cloud/api/docs#tag/Webhooks). This is a Prefect Cloud-only feature. You authenticate API calls using the standard [authentication methods you use with Prefect Cloud](https://docs.prefect.io/cloud/connecting#manually-configure-prefect-api-settings). + +### Via Prefect Cloud + +Webhooks can be created and managed from the Prefect Cloud UI. + +![Managing a webhook in the Prefect Cloud UI.](/images/webhooks1.png) + +### Via the Prefect CLI + +Webhooks can be managed and interacted with via the `prefect cloud webhook` command group. + +``` +prefect cloud webhook --help + +``` + + +You can create your first webhook by invoking `create`: + +``` +prefect cloud webhook create your-webhook-name \ + --description "Receives webhooks from your system" \ + --template '{ "event": "your.event.name", "resource": { "prefect.resource.id": "your.resource.id" } }' + +``` + + +Note the template string, which is discussed in [greater detail below](#webhook-templates) + +You can retrieve details for a specific webhook by ID using `get`, or optionally query all webhooks in your workspace via `ls`: + +``` +# get webhook by ID +prefect cloud webhook get + +# list all configured webhooks in your workspace + +prefect cloud webhook ls + +``` + + +If you need to disable an existing webhook without deleting it, use `toggle`: + +``` +prefect cloud webhook toggle +Webhook is now disabled + +prefect cloud webhook toggle +Webhook is now enabled + +``` + + +If you are concerned that your webhook endpoint may have been compromised, use `rotate` to generate a new, random endpoint + +``` +prefect cloud webhook rotate + +``` + + +Webhook endpoints +--------------------------------------------------------- + +The webhook endpoints have randomly generated opaque URLs that do not divulge any information about your Prefect Cloud workspace. They are rooted at `https://api.prefect.cloud/hooks/`. For example: `https://api.prefect.cloud/hooks/AERylZ_uewzpDx-8fcweHQ`. Prefect Cloud assigns this URL when you create a webhook; it cannot be set via the API. You may rotate your webhook URL at any time without losing the associated configuration. + +All webhooks may accept requests via the most common HTTP methods: + +* `GET`, `HEAD`, and `DELETE` may be used for webhooks that define a static event template, or a template that does not depend on the _body_ of the HTTP request. The headers of the request will be available for templates. +* `POST`, `PUT`, and `PATCH` may be used when the webhook request will include a body. See [How HTTP request components are handled](#how-http-request-components-are-handled) for more details on how the body is parsed. + +Prefect Cloud webhooks are deliberately quiet to the outside world, and will only return a `204 No Content` response when they are successful, and a `400 Bad Request` error when there is any error interpreting the request. For more visibility when your webhooks fail, see the [Troubleshooting](#troubleshooting) section below. + +Webhook templates +--------------------------------------------------------- + +The purpose of a webhook is to accept an HTTP request from another system and produce a Prefect event from it. You may find that you often have little influence or control over the format of those requests, so Prefect's webhook system gives you full control over how you turn those notifications from other systems into meaningful events in your Prefect Cloud workspace. The template you define for each webhook will determine how individual components of the incoming HTTP request become the event name and resource labels of the resulting Prefect event. + +As with the [templates available in Prefect Cloud Automation](https://docs.prefect.io/cloud/automations) for defining notifications and other parameters, you will write templates in [Jinja2](https://jinja.palletsprojects.com/en/3.1.x/templates/). All of the built-in Jinja2 blocks and filters are available, as well as the filters from the [`jinja2-humanize-extensions` package](https://pypi.org/project/jinja2-humanize-extension/). + +Your goal when defining your event template is to produce a valid JSON object that defines (at minimum) the `event` name and the `resource["prefect.resource.id"]`, which are required of all events. The simplest template is one in which these are statically defined. + +### Static webhook events + +Let's see a static webhook template example. Say you want to configure a webhook that will notify Prefect when your `recommendations` machine learning model has been updated, so you can then send a Slack notification to your team and run a few subsequent deployments. Those models are produced on a daily schedule by another team that is using `cron` for scheduling. They aren't able to use Prefect for their flows (yet!), but they are happy to add a `curl` to the end of their daily script to notify you. Because this webhook will only be used for a single event from a single resource, your template can be entirely static: + +``` +{ + "event": "model.refreshed", + "resource": { + "prefect.resource.id": "product.models.recommendations", + "prefect.resource.name": "Recommendations [Products]", + "producing-team": "Data Science" + } +} + +``` + + +**Make sure to produce valid JSON** + +The output of your template, when rendered, should be a valid string that can be parsed, for example, with `json.loads`. + + +A webhook with this template may be invoked via _any_ of the HTTP methods, including a `GET` request with no body, so the team you are integrating with can include this line at the end of their daily script: + +``` +curl https://api.prefect.cloud/hooks/AERylZ_uewzpDx-8fcweHQ + +``` + + +Each time the script hits the webhook, the webhook will produce a single Prefect event with that name and resource in your workspace. + +### Event fields that Prefect Cloud populates for you + +You may notice that you only had to provide the `event` and `resource` definition, which is not a completely fleshed out event. Prefect Cloud will set default values for any missing fields, such as `occurred` and `id`, so you don't need to set them in your template. Additionally, Prefect Cloud will add the webhook itself as a related resource on all of the events it produces. + +If your template does not produce a `payload` field, the `payload` will default to a standard set of debugging information, including the HTTP method, headers, and body. + +### Dynamic webhook events + +Now let's say that after a few days you and the Data Science team are getting a lot of value from the automations you have set up with the static webhook. You've agreed to upgrade this webhook to handle all of the various models that the team produces. It's time to add some dynamic information to your webhook template. + +Your colleagues on the team have adjusted their daily `cron` scripts to `POST` a small body that includes the ID and name of the model that was updated: + +``` +curl \ + -d "model=recommendations" \ + -d "friendly_name=Recommendations%20[Products]" \ + -X POST https://api.prefect.cloud/hooks/AERylZ_uewzpDx-8fcweHQ + +``` + + +This script will send a `POST` request and the body will include a traditional URL-encoded form with two fields describing the model that was updated: `model` and `friendly_name`. Here's the webhook code that uses Jinja to receive these values in your template and produce different events for the different models: + +``` +{ + "event": "model.refreshed", + "resource": { + "prefect.resource.id": "product.models.{{ body.model }}", + "prefect.resource.name": "{{ body.friendly_name }}", + "producing-team": "Data Science" + } +} + +``` + + +All subsequent `POST` requests will produce events with those variable resource IDs and names. The other statically-defined parts, such as `event` or the `producing-team` label you included earlier will still be used. + +**Use Jinja2's `default` filter to handle missing values** + +Jinja2 has a helpful [`default`](https://jinja.palletsprojects.com/en/3.1.x/templates/#jinja-filters.default) filter that can compensate for missing values in the request. In this example, you may want to use the model's ID in place of the friendly name when the friendly name is not provided: `{{ body.friendly_name|default(body.model) }}`. + + +### How HTTP request components are handled + +The Jinja2 template context includes the three parts of the incoming HTTP request: + +* `method` is the uppercased string of the HTTP method, like `GET` or `POST`. +* `headers` is a case-insensitive dictionary of the HTTP headers included with the request. To prevent accidental disclosures, the `Authorization` header is removed. +* `body` represents the body that was posted to the webhook, with a best-effort approach to parse it into an object you can access. + +HTTP headers are available without any alteration as a `dict`\-like object, but you may access them with header names in any case. For example, these template expressions all return the value of the `Content-Length` header: + +``` +{{ headers['Content-Length'] }} + +{{ headers['content-length'] }} + +{{ headers['CoNtEnt-LeNgTh'] }} + +``` + + +The HTTP request body goes through some light preprocessing to make it more useful in templates. If the `Content-Type` of the request is `application/json`, the body will be parsed as a JSON object and made available to the webhook templates. If the `Content-Type` is `application/x-www-form-urlencoded` (as in our example above), the body is parsed into a flat `dict`\-like object of key-value pairs. Jinja2 supports both index and attribute access to the fields of these objects, so the following two expressions are equivalent: + +``` +{{ body['friendly_name'] }} + +{{ body.friendly_name }} + +``` + + +**Only for Python identifiers** + +Jinja2's syntax only allows attribute-like access if the key is a valid Python identifier, so `body.friendly-name` will not work. Use `body['friendly-name']` in those cases. + +You may not have much control over the client invoking your webhook, but would still like for bodies that look like JSON to be parsed as such. Prefect Cloud will attempt to parse any other content type (like `text/plain`) as if it were JSON first. In any case where the body cannot be transformed into JSON, it will be made available to your templates as a Python `str`. + +### Accepting Prefect events directly + +In cases where you have more control over the client, your webhook can accept Prefect events directly with a simple pass-through template: + +This template accepts the incoming body (assuming it was in JSON format) and just passes it through unmodified. This allows a `POST` of a partial Prefect event as in this example: + +``` +POST /hooks/AERylZ_uewzpDx-8fcweHQ HTTP/1.1 +Host: api.prefect.cloud +Content-Type: application/json +Content-Length: 228 + +{ + "event": "model.refreshed", + "resource": { + "prefect.resource.id": "product.models.recommendations", + "prefect.resource.name": "Recommendations [Products]", + "producing-team": "Data Science" + } +} + +``` + + +The resulting event will be filled out with the default values for `occurred`, `id`, and other fields as described [above](#event-fields-that-prefect-cloud-populates-for-you). + +### Accepting CloudEvents + +The [Cloud Native Computing Foundation](https://cncf.io/) has standardized [CloudEvents](https://cloudevents.io/) for use by systems to exchange event information in a common format. These events are supported by major cloud providers and a growing number of cloud-native systems. Prefect Cloud can interpret a webhook containing a CloudEvent natively with the following template: + +``` +{{ body|from_cloud_event(headers) }} + +``` + + +The resulting event will use the CloudEvent's `subject` as the resource (or the `source` if no `subject` is available). The CloudEvent's `data` attribute will become the Prefect event's `payload['data']`, and the other CloudEvent metadata will be at `payload['cloudevents']`. If you would like to handle CloudEvents in a more specific way tailored to your use case, use a dynamic template to interpret the incoming `body`. + +Troubleshooting +----------------------------------------------------- + +The initial configuration of your webhook may require some trial and error as you get the sender and your receiving webhook speaking a compatible language. While you are in this phase, you may find the [Event Feed](https://docs.prefect.io/cloud/events/#event-feed) in the UI to be indispensable for seeing the events as they are happening. + +When Prefect Cloud encounters an error during receipt of a webhook, it will produce a `prefect-cloud.webhook.failed` event in your workspace. This event will include critical information about the HTTP method, headers, and body it received, as well as what the template rendered. Keep an eye out for these events when something goes wrong. diff --git a/docs/2.19.x/how-to-guides/execution/big-data.mdx b/docs/2.19.x/how-to-guides/execution/big-data.mdx new file mode 100644 index 000000000000..73b6583f655d --- /dev/null +++ b/docs/2.19.x/how-to-guides/execution/big-data.mdx @@ -0,0 +1,110 @@ +--- +title: Big Data +description: In this guide you'll learn tips for working with large amounts of data in Prefect. +--- + +Big data doesn't have a widely accepted, precise definition. In this guide, we'll discuss methods to reduce the processing time or memory utilization of Prefect workflows, without editing your Python code. + +Optimizing your Python code with Prefect for big data +--------------------------------------------------------------------------------------------------------------------------------- + +Depending upon your needs, you may want to optimize your Python code for speed, memory, compute, or disk space. + +Prefect provides several options that we'll explore in this guide: + +1. Remove task introspection with `quote` to save time running your code. +2. Write task results to cloud storage such as S3 using a block to save memory. +3. Save data to disk within a flow rather than using results. +4. Cache task results to save time and compute. +5. Compress results written to disk to save space. +6. Use a [task runner](https://docs.prefect.io/concepts/task-runners/) for parallelizable operations to save time. + +### Remove task introspection + +When a task is called from a flow, each argument is introspected by Prefect, by default. To speed up your flow runs, you can disable this behavior for a task by wrapping the argument using [`quote`](https://docs.prefect.io/latest/api-ref/prefect/utilities/annotations/#prefect.utilities.annotations.quote). + +To demonstrate, let's use a basic example that extracts and transforms some New York taxi data. + + + +```python et_quote.py +from prefect import task, flow +from prefect.utilities.annotations import quote +import pandas as pd + + +@task +def extract(url: str): + """Extract data""" + df_raw = pd.read_parquet(url) + print(df_raw.info()) + return df_raw + + +@task +def transform(df: pd.DataFrame): + """Basic transformation""" + df["tip_fraction"] = df["tip_amount"] / df["total_amount"] + print(df.info()) + return df + + +@flow(log_prints=True) +def et(url: str): + """ET pipeline""" + df_raw = extract(url) + df = transform(quote(df_raw)) + + +if __name__ == "__main__": + url = "https://d37ci6vzurychx.cloudfront.net/trip-data/yellow_tripdata_2023-09.parquet" + et(url) + +``` + + +Introspection can take significant time when the object being passed is a large collection, such as dictionary or DataFrame, where each element needs to be visited. Note that using `quote` reduces execution time at the expense of disabling task dependency tracking for the wrapped object. + +### Write task results to cloud storage + +By default, the results of task runs are stored in memory in your execution environment. This behavior makes flow runs fast for small data, but can be problematic for large data. You can save memory by writing results to disk. In production, you'll generally want to write results to a cloud provider storage such as AWS S3. Prefect lets you to use a storage block from a Prefect cloud integration library such as [prefect-aws](https://prefecthq.github.io/prefect-aws/) to save your configuration information. Learn more about blocks [here](https://docs.prefect.io/concepts/blocks/). + +Install the relevant library, register the block with the server, and create your storage block. Then you can reference the block in your flow like this: + +```python +... +from prefect_aws.s3 import S3Bucket + +my_s3_block = S3Bucket.load("MY_BLOCK_NAME") + +... +@task(result_storage=my_s3_block) + +``` + + +Now the result of the task will be written to S3, rather than stored in memory. + +### Save data to disk within a flow + +To save memory and time with big data, you don't need to pass results between tasks at all. Instead, you can write and read data to disk directly in your flow code. Prefect has integration libraries for each of the major cloud providers. Each library contains blocks with methods that make it convenient to read and write data to and from cloud object storage. The [moving data guide](https://docs.prefect.io/guides/moving-data/) has step-by-step examples for each cloud provider. + +### Cache task results + +Caching allows you to avoid re-running tasks when doing so is unnecessary. Caching can save you time and compute. Note that caching requires task result persistence. Caching is discussed in detail in the [tasks concept page](https://docs.prefect.io/concepts/tasks.md/#caching). + +### Compress results written to disk + +If you're using Prefect's task result persistence, you can save disk space by compressing the results. You just need to specify the result type with `compressed/` prefixed like this: + +``` +@task(result_serializer="compressed/json") + +``` + + +Read about [compressing results with Prefect](https://docs.prefect.io/concepts/results/) for more details. The tradeoff of using compression is that it takes time to compress and decompress the data. + +### Use a task runner for parallelizable operations + +Prefect's task runners allow you to use the Dask and Ray Python libraries to run tasks in parallel and distributed across multiple machines. This can save you time and compute when operating on large data structures. See the [guide to working with Dask and Ray Task Runners](https://docs.prefect.io/guides/dask-ray-task-runners/) for details. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/execution/dask--ray.mdx b/docs/2.19.x/how-to-guides/execution/dask--ray.mdx new file mode 100644 index 000000000000..2f67d9222370 --- /dev/null +++ b/docs/2.19.x/how-to-guides/execution/dask--ray.mdx @@ -0,0 +1,291 @@ +--- +title: Dask and Ray Task Runners +sidebarTitle: Dask and Ray +--- + +Task runners provide an execution environment for tasks. In a flow decorator, you can specify a task runner to run the tasks called in that flow. + +The default task runner is the [`ConcurrentTaskRunner`](https://docs.prefect.io/api-ref/prefect/task-runners/#prefect.task_runners.ConcurrentTaskRunner). + + +**Use `.submit` to run your tasks asynchronously** + + +To run tasks asynchronously use the `.submit` method when you call them. If you call a task as you would normally in Python code it will run synchronously, even if you are calling the task within a flow that uses the `ConcurrentTaskRunner`, `DaskTaskRunner`, or `RayTaskRunner`. + +Many real-world data workflows benefit from true parallel, distributed task execution. For these use cases, the following Prefect-developed task runners for parallel task execution may be installed as [Prefect Integrations](https://docs.prefect.io/integrations/catalog/). + +* [`DaskTaskRunner`](https://prefecthq.github.io/prefect-dask/) runs tasks requiring parallel execution using [`dask.distributed`](http://distributed.dask.org/). +* [`RayTaskRunner`](https://prefecthq.github.io/prefect-ray/) runs tasks requiring parallel execution using [Ray](https://www.ray.io/). + +These task runners can spin up a local Dask cluster or Ray instance on the fly, or let you connect with a Dask or Ray environment you've set up separately. Then you can take advantage of massively parallel computing environments. + +Use Dask or Ray in your flows to choose the execution environment that fits your particular needs. + +To show you how they work, let's start small. + + +**Remote storage** + +We recommend configuring [remote file storage](https://docs.prefect.io/concepts/storage/) for task execution with `DaskTaskRunner` or `RayTaskRunner`. This ensures tasks executing in Dask or Ray have access to task result storage, particularly when accessing a Dask or Ray instance outside of your execution environment. + + +Configure a task runner +--------------------------------------------------------------------- + +You may have seen this briefly in a previous tutorial, but let's look a bit more closely at how you can configure a specific task runner for a flow. + +Let's start with the [`SequentialTaskRunner`](https://docs.prefect.io/api-ref/prefect/task-runners/#prefect.task_runners.SequentialTaskRunner). This task runner runs all tasks synchronously and may be useful when used as a debugging tool in conjunction with async code. + +Let's start with this simple flow. We import the `SequentialTaskRunner`, specify a `task_runner` on the flow, and call the tasks with `.submit()`. + +```python +from prefect import flow, task +from prefect.task_runners import SequentialTaskRunner + + +@task +def say_hello(name): + print(f"hello {name}") + + +@task +def say_goodbye(name): + print(f"goodbye {name}") + + +@flow(task_runner=SequentialTaskRunner()) +def greetings(names): + for name in names: + say_hello.submit(name) + say_goodbye.submit(name) + +if __name__ == "__main__": + greetings(["arthur", "trillian", "ford", "marvin"]) + +``` + + +Save this code as `sequential_flow.py` and run it. + +``` +python sequential_flow.py + +``` + + +If you remove the log messages from the output and just look at the printed output from the task runs, you see they're executed sequentially: + +``` +hello arthur +goodbye arthur +hello trillian +goodbye trillian +hello ford +goodbye ford +hello marvin +goodbye marvin + +``` + + +Run tasks in parallel with Dask +------------------------------------------------------------------------------------- + +This basic flow won't benefit from parallel execution, but let's proceed so you can see just how simple it is to use the [`DaskTaskRunner`](https://prefecthq.github.io/prefect-dask/) for more complex flows. + +Configure your flow to use the `DaskTaskRunner`: + +1. Make sure the `prefect-dask` collection is installed by running `pip install -U prefect-dask`. +2. In your flow code, import `DaskTaskRunner` from `prefect_dask.task_runners`. +3. Assign it as the task runner when the flow is defined using the `task_runner=DaskTaskRunner` argument. +4. Use the `.submit` method when calling task-decorated functions. + +Example code: + +```python +from prefect import flow, task +from prefect_dask.task_runners import DaskTaskRunner + + +@task +def say_hello(name): + print(f"hello {name}") + + +@task +def say_goodbye(name): + print(f"goodbye {name}") + + +@flow(task_runner=DaskTaskRunner()) +def greetings(names): + for name in names: + say_hello.submit(name) + say_goodbye.submit(name) + + +if __name__ == "__main__": + greetings(["arthur", "trillian", "ford", "marvin"]) + +``` + + +Note that, because you're using `DaskTaskRunner` in a script, you must use `if __name__ == "__main__":` or you'll see warnings and errors. + +Run `dask_flow.py`. If you get a warning about accepting incoming network connections, that's okay - everything is local in this example. + +`DaskTaskRunner` automatically creates a local Dask cluster, then starts executing all of the task runs in parallel. The results do not return in the same order as the sequential code above. + +Abbreviated output: + +``` +goodbye marvin +hello arthur +goodbye ford +hello trillian + +``` + + +Notice what happens if you do not use the `submit` method when calling tasks: + +```python +from prefect import flow, task +from prefect_dask.task_runners import DaskTaskRunner + + +@task +def say_hello(name): + print(f"hello {name}") + + +@task +def say_goodbye(name): + print(f"goodbye {name}") + + +@flow(task_runner=DaskTaskRunner()) +def greetings(names): + for name in names: + say_hello(name) + say_goodbye(name) + + +if __name__ == "__main__": + greetings(["arthur", "trillian", "ford", "marvin"]) + +``` + + +Run the script: + +Once again, the tasks run sequentially. Here's the output with logs removed. + +``` +hello arthur +goodbye arthur +hello trillian +goodbye trillian +hello ford +goodbye ford +hello marvin +goodbye marvin + +``` + + +The task runs are not submitted to the `DaskTaskRunner`; instead, they run sequentially. + +Run tasks in parallel with Ray +----------------------------------------------------------------------------------- + +You can easily switch to Ray as another parallel task runner option. Use the [`RayTaskRunner`](https://prefecthq.github.io/prefect-ray/) instead of `DaskTaskRunner`. + +To configure your flow to use the `RayTaskRunner`: + +1. Install `prefect-ray` into your environment with `pip install -U prefect-ray`. +2. In your flow code, import `RayTaskRunner` from `prefect_ray.task_runners`. +3. Specify the task runner when the flow is defined using the `task_runner=RayTaskRunner` argument. + + +**Ray environment limitations** + +While we're excited about parallel task execution via Ray, there are a few limitations with Ray you should be aware of: + +* Support for Python 3.11 is [experimental](https://docs.ray.io/en/latest/ray-overview/installation.html#install-nightlies). +* Ray support for non-x86/64 architectures such as ARM/M1 processors with installation from `pip` alone and will be skipped during installation of Prefect. It is possible to manually install the blocking component with `conda`. See the [Ray documentation](https://docs.ray.io/en/latest/ray-overview/installation.html#m1-mac-apple-silicon-support) for instructions. +* Ray's Windows support is currently in beta. + +See the [Ray installation documentation](https://docs.ray.io/en/latest/ray-overview/installation.html) for further compatibility information. + + +Save this code in `ray_flow.py`. + +```python +from prefect import flow, task +from prefect_ray.task_runners import RayTaskRunner + +@task +def say_hello(name): + print(f"hello {name}") + +@task +def say_goodbye(name): + print(f"goodbye {name}") + +@flow(task_runner=RayTaskRunner()) +def greetings(names): + for name in names: + say_hello.submit(name) + say_goodbye.submit(name) + +if __name__ == "__main__": + greetings(["arthur", "trillian", "ford", "marvin"]) + +``` + + +Now run `ray_flow.py` `RayTaskRunner` automatically creates a local Ray instance, then immediately starts executing all of the tasks in parallel. If you have an existing Ray instance, you can provide the address as a parameter to run tasks in the instance. See [Running tasks on Ray](https://docs.prefect.io/concepts/task-runners/#running_tasks_on_ray) for details. + +Using multiple task runners +----------------------------------------------------------------------------- + +Many workflows include a variety of tasks, and not all of them benefit from parallel execution. You'll most likely want to use the Dask or Ray task runners and spin up their respective resources only for those tasks that need them. + +Because task runners are specified on flows, you can assign different task runners to tasks by using [subflows](https://docs.prefect.io/concepts/flows/#composing-flows) to organize those tasks. + +This example uses the same tasks as the previous examples, but on the parent flow `greetings()` we use the default `ConcurrentTaskRunner`. Then we call a `ray_greetings()` subflow that uses the `RayTaskRunner` to execute the same tasks in a Ray instance. + +```python +from prefect import flow, task +from prefect_ray.task_runners import RayTaskRunner + +@task +def say_hello(name): + print(f"hello {name}") + +@task +def say_goodbye(name): + print(f"goodbye {name}") + +@flow(task_runner=RayTaskRunner()) +def ray_greetings(names): + for name in names: + say_hello.submit(name) + say_goodbye.submit(name) + +@flow() +def greetings(names): + for name in names: + say_hello.submit(name) + say_goodbye.submit(name) + ray_greetings(names) + +if __name__ == "__main__": + greetings(["arthur", "trillian", "ford", "marvin"]) + +``` + + +If you save this as `ray_subflow.py` and run it, you'll see that the flow `greetings` runs as you'd expect for a concurrent flow, then flow `ray-greetings` spins up a Ray instance to run the tasks again. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/execution/docker.mdx b/docs/2.19.x/how-to-guides/execution/docker.mdx new file mode 100644 index 000000000000..d788839a0479 --- /dev/null +++ b/docs/2.19.x/how-to-guides/execution/docker.mdx @@ -0,0 +1,436 @@ +--- +sidebarTitle: Docker +title: Running Flows with Docker +--- + + + +In the [Deployments](https://docs.prefect.io/tutorial/deployments/) tutorial, we looked at serving a flow that enables scheduling or creating flow runs via the Prefect API. + +With our Python script in hand, we can build a Docker image for our script, allowing us to serve our flow in various remote environments. We'll use Kubernetes in this guide, but you can use any Docker-compatible infrastructure. + +In this guide we'll: + +* Write a Dockerfile to build an image that stores our Prefect flow code. +* Build a Docker image for our flow. +* Deploy and run our Docker image on a Kubernetes cluster. +* Look at the Prefect-maintained Docker images and discuss options for use + +Note that in this guide we'll create a Dockerfile from scratch. Alternatively, Prefect makes it convenient to build a Docker image as part of deployment creation. You can even include environment variables and specify additional Python packages to install at runtime. + +If creating a deployment with a `prefect.yaml` file, the build step makes it easy to customize your Docker image and push it to the registry of your choice. See an example [here](https://docs.prefect.io/guides/deployment/kubernetes/#define-a-deployment). + +Deployment creation with a Python script that includes `flow.deploy` similarly allows you to customize your Docker image with keyword arguments as shown below. + +```python +... + +if __name__ == "__main__": + hello_world.deploy( + name="my-first-deployment", + work_pool_name="above-ground", + image='my_registry/hello_world:demo', + job_variables={"env": { "EXTRA_PIP_PACKAGES": "boto3" } } + ) + +``` + + +Prerequisites +------------------------------------------------- + +To complete this guide, you'll need the following: + +* A Python script that defines and serves a flow. +* We'll use the flow script and deployment from the [Deployments](https://docs.prefect.io/tutorial/deployments/) tutorial. +* Access to a running Prefect API server. +* You can sign up for a forever free [Prefect Cloud account](https://docs.prefect.io/cloud/) or run a Prefect API server locally with `prefect server start`. +* [Docker Desktop](https://docs.docker.com/desktop/) installed on your machine. + +Writing a Dockerfile +--------------------------------------------------------------- + +First let's make a clean directory to work from, `prefect-docker-guide`. + +``` +mkdir prefect-docker-guide +cd prefect-docker-guide + +``` + + +In this directory, we'll create a sub-directory named `flows` and put our flow script from the [Deployments](https://docs.prefect.io/tutorial/deployments/) tutorial in it. + +``` +mkdir flows +cd flows +touch prefect-docker-guide-flow.py + +``` + + +Here's the flow code for reference: + +prefect-docker-guide-flow.py + +``` +import httpx +from prefect import flow + + +@flow(log_prints=True) +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + url = f"https://api.github.com/repos/{repo_name}" + response = httpx.get(url) + response.raise_for_status() + repo = response.json() + print(f"{repo_name} repository statistics 🤓:") + print(f"Stars 🌠 : {repo['stargazers_count']}") + print(f"Forks 🍴 : {repo['forks_count']}") + + +if __name__ == "__main__": + get_repo_info.serve(name="prefect-docker-guide") + +``` + + +The next file we'll add to the `prefect-docker-guide` directory is a `requirements.txt`. We'll include all dependencies required for our `prefect-docker-guide-flow.py` script in the Docker image we'll build. + +``` +# ensure you run this line from the top level of the `prefect-docker-guide` directory +touch requirements.txt + +``` + + +Here's what we'll put in our `requirements.txt` file: + +requirements.txt + +``` +prefect>=2.12.0 +httpx + +``` + + +Next, we'll create a `Dockerfile` that we'll use to create a Docker image that will also store the flow code. + +We'll add the following content to our `Dockerfile`: + +Dockerfile + +``` +# We're using the latest version of Prefect with Python 3.10 +FROM prefecthq/prefect:2-python3.10 + +# Add our requirements.txt file to the image and install dependencies +COPY requirements.txt . +RUN pip install -r requirements.txt --trusted-host pypi.python.org --no-cache-dir + +# Add our flow code to the image +COPY flows /opt/prefect/flows + +# Run our flow script when the container starts +CMD ["python", "flows/prefect-docker-guide-flow.py"] + +``` + + +Building a Docker image +--------------------------------------------------------------------- + +Now that we have a Dockerfile we can build our image by running: + +``` +docker build -t prefect-docker-guide-image . + +``` + + +We can check that our build worked by running a container from our new image. + + + +Our container will need an API URL and and API key to communicate with Prefect Cloud. + +* You can get an API key from the [API Keys](https://docs.prefect.io/2.12.0/cloud/users/api-keys/) section of the user settings in the Prefect UI. + +* You can get your API URL by running `prefect config view` and copying the `PREFECT_API_URL` value. + + +We'll provide both these values to our container by passing them as environment variables with the `-e` flag. + +``` +docker run -e PREFECT_API_URL=YOUR_PREFECT_API_URL -e PREFECT_API_KEY=YOUR_API_KEY prefect-docker-guide-image + +``` + + +After running the above command, the container should start up and serve the flow within the container! + + +Our container will need an API URL and network access to communicate with the Prefect API. + +For this guide, we'll assume the Prefect API is running on the same machine that we'll run our container on and the Prefect API was started with `prefect server start`. If you're running a different setup, check out the [Hosting a Prefect server guide](https://docs.prefect.io/guides/host/) for information on how to connect to your Prefect API instance. + +To ensure that our flow container can communicate with the Prefect API, we'll set our `PREFECT_API_URL` to `http://host.docker.internal:4200/api`. If you're running Linux, you'll need to set your `PREFECT_API_URL` to `http://localhost:4200/api` and use the `--network="host"` option instead. + +``` +docker run --network="host" -e PREFECT_API_URL=http://host.docker.internal:4200/api prefect-docker-guide-image + +``` + + +After running the above command, the container should start up and serve the flow within the container! + + + + + + + +Deploying to a remote environment +----------------------------------------------------------------------------------------- + +Now that we have a Docker image with our flow code embedded, we can deploy it to a remote environment! + +For this guide, we'll simulate a remote environment by using Kubernetes locally with Docker Desktop. You can use the [instructions provided by Docker to set up Kubernetes locally.](https://docs.docker.com/desktop/kubernetes/) + +### Creating a Kubernetes deployment manifest + +To ensure the process serving our flow is always running, we'll create a [Kubernetes deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/). If our flow's container ever crashes, Kubernetes will automatically restart it, ensuring that we won't miss any scheduled runs. + +First, we'll create a `deployment-manifest.yaml` file in our `prefect-docker-guide` directory: + +``` +touch deployment-manifest.yaml + +``` + + +And we'll add the following content to our `deployment-manifest.yaml` file: + + + + + + +```python deployment-manifest.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prefect-docker-guide +spec: + replicas: 1 + selector: + matchLabels: + flow: get-repo-info + template: + metadata: + labels: + flow: get-repo-info + spec: + containers: + - name: flow-container + image: prefect-docker-guide-image:latest + env: + - name: PREFECT_API_URL + value: YOUR_PREFECT_API_URL + - name: PREFECT_API_KEY + value: YOUR_API_KEY + # Never pull the image because we're using a local image + imagePullPolicy: Never + +``` + + +**Keep your API key secret** + +In the above manifest we are passing in the Prefect API URL and API key as environment variables. This approach is simple, but it is not secure. If you are deploying your flow to a remote cluster, you should use a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/) to store your API key. + + + + + + + +```python deployment-manifest.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prefect-docker-guide +spec: + replicas: 1 + selector: + matchLabels: + flow: get-repo-info + template: + metadata: + labels: + flow: get-repo-info + spec: + containers: + - name: flow-container + image: prefect-docker-guide-image:latest + env: + - name: PREFECT_API_URL + value: + # Never pull the image because we're using a local image + imagePullPolicy: Never + +``` + + +**Linux users** + +If you're running Linux, you'll need to set your `PREFECT_API_URL` to use the IP address of your machine instead of `host.docker.internal`. + +This manifest defines how our image will run when deployed in our Kubernetes cluster. Note that we will be running a single replica of our flow container. If you want to run multiple replicas of your flow container to keep up with an active schedule, or because our flow is resource-intensive, you can increase the `replicas` value. + + + + + + +### Deploying our flow to the cluster + +Now that we have a deployment manifest, we can deploy our flow to the cluster by running: + +``` +kubectl apply -f deployment-manifest.yaml + +``` + + +We can monitor the status of our Kubernetes deployment by running: + +Once the deployment has successfully started, we can check the logs of our flow container by running the following: + +``` +kubectl logs -l flow=get-repo-info + +``` + + +Now that we're serving our flow in our cluster, we can trigger a flow run by running: + +``` +prefect deployment run get-repo-info/prefect-docker-guide + +``` + + +If we navigate to the URL provided by the `prefect deployment run` command, we can follow the flow run via the logs in the Prefect UI! + +Prefect-maintained Docker images +--------------------------------------------------------------------------------------- + +Every release of Prefect results in several new Docker images. These images are all named [prefecthq/prefect](https://hub.docker.com/r/prefecthq/prefect) and their **tags** identify their differences. + +### Image tags + +When a release is published, images are built for all of Prefect's supported Python versions. These images are tagged to identify the combination of Prefect and Python versions contained. Additionally, we have "convenience" tags which are updated with each release to facilitate automatic updates. + +For example, when release `2.11.5` is published: + +1. Images with the release packaged are built for each supported Python version (3.8, 3.9, 3.10, 3.11) with both standard Python and Conda. +2. These images are tagged with the full description, e.g. `prefect:2.1.1-python3.10` and `prefect:2.1.1-python3.10-conda`. +3. For users that want more specific pins, these images are also tagged with the SHA of the git commit of the release, e.g. `sha-88a7ff17a3435ec33c95c0323b8f05d7b9f3f6d2-python3.10` +4. For users that want to be on the latest `2.1.x` release, receiving patch updates, we update a tag without the patch version to this release, e.g. `prefect.2.1-python3.10`. +5. For users that want to be on the latest `2.x.y` release, receiving minor version updates, we update a tag without the minor or patch version to this release, e.g. `prefect.2-python3.10` +6. Finally, for users who want the latest `2.x.y` release without specifying a Python version, we update `2-latest` to the image for our highest supported Python version, which in this case would be equivalent to `prefect:2.1.1-python3.10`. + + +**Choose image versions carefully** + +It's a good practice to use Docker images with specific Prefect versions in production. + +Use care when employing images that automatically update to new versions (such as `prefecthq/prefect:2-python3.11` or `prefecthq/prefect:2-latest`). + + +### Standard Python + +Standard Python images are based on the official Python `slim` images, e.g. `python:3.10-slim`. + + +|Tag |Prefect Version |Python Version| +|---------------------|---------------------------|--------------| +|2-latest |most recent v2 PyPi version|3.10 | +|2-python3.11 |most recent v2 PyPi version|3.11 | +|2-python3.10 |most recent v2 PyPi version|3.10 | +|2-python3.9 |most recent v2 PyPi version|3.9 | +|2-python3.8 |most recent v2 PyPi version|3.8 | +|2.X-python3.11 |2.X |3.11 | +|2.X-python3.10 |2.X |3.10 | +|2.X-python3.9 |2.X |3.9 | +|2.X-python3.8 |2.X |3.8 | +|sha-\-python3.11|\ |3.11 | +|sha-\-python3.10|\ |3.10 | +|sha-\-python3.9 |\ |3.9 | +|sha-\-python3.8 |\ |3.8 | + + +### Conda-flavored Python + +Conda flavored images are based on `continuumio/miniconda3`. Prefect is installed into a conda environment named `prefect`. + + +|Tag |Prefect Version |Python Version| +|----------------------------|----------------------------|--------------| +|2-latest-conda |most recent v2 PyPi version |3.10 | +|2-python3.11-conda |most recent v2 PyPi version |3.11 | +|2-python3.10-conda |most recent v2 PyPi version |3.10 | +|2-python3.9-conda |most recent v2 PyPi version |3.9 | +|2-python3.8-conda |most recent v2 PyPi version |3.8 | +|2.X-python3.11-conda |2.X |3.11 | +|2.X-python3.10-conda |2.X |3.10 | +|2.X-python3.9-conda |2.X |3.9 | +|2.X-python3.8-conda |2.X |3.8 | +|sha-\-python3.11-conda|\ |3.11 | +|sha-\-python3.10-conda|\ |3.10 | +|sha-\-python3.9-conda |\ |3.9 | +|sha-\-python3.8-conda |\ |3.8 | + + +Building your own image +--------------------------------------------------------------------- + +If your flow relies on dependencies not found in the default `prefecthq/prefect` images, you may want to build your own image. You can either base it off of one of the provided `prefecthq/prefect` images, or build your own image. See the [Work pool deployment guide](https://docs.prefect.io/guides/prefect-deploy/) for discussion of how Prefect can help you build custom images with dependencies specified in a `requirements.txt` file. + +By default, Prefect [work pools](https://docs.prefect.io/concepts/work-pools) that use containers refer to the `2-latest` image. You can specify another image at work pool creation. The work pool image choice can be overridden in individual deployments. + +### Extending the `prefecthq/prefect` image manually + +Here we provide an example `Dockerfile` for building an image based on `prefecthq/prefect:2-latest`, but with `scikit-learn` installed. + +``` +FROM prefecthq/prefect:2-latest + +RUN pip install scikit-learn + +``` + + +### Choosing an image strategy + +The options described above have different complexity (and performance) characteristics. For choosing a strategy, we provide the following recommendations: + +* If your flow only makes use of tasks defined in the same file as the flow, or tasks that are part of `prefect` itself, then you can rely on the default provided `prefecthq/prefect` image. + +* If your flow requires a few extra dependencies found on PyPI, you can use the default `prefecthq/prefect` image and set `prefect.deployments.steps.pip_install_requirements:` in the `pull`step to install these dependencies at runtime. + +* If the installation process requires compiling code or other expensive operations, you may be better off building a custom image instead. + +* If your flow (or flows) require extra dependencies or shared libraries, we recommend building a shared custom image with all the extra dependencies and shared task definitions you need. Your flows can then all rely on the same image, but have their source stored externally. This option can ease development, as the shared image only needs to be rebuilt when dependencies change, not when the flow source changes. + + +Next steps +------------------------------------------- + +We only served a single flow in this guide, but you can extend this setup to serve multiple flows in a single Docker image by updating your Python script to using `flow.to_deployment` and `serve` to [serve multiple flows or the same flow with different configuration](https://docs.prefect.io/concepts/flows#serving-multiple-flows-at-once). + +To learn more about deploying flows, check out the [Deployments](https://docs.prefect.io/concepts/deployments/) concept doc! + +For advanced infrastructure requirements, such as executing each flow run within its own dedicated Docker container, learn more in the [Work pool deployment guide](https://docs.prefect.io/guides/prefect-deploy/). \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/execution/logging.mdx b/docs/2.19.x/how-to-guides/execution/logging.mdx new file mode 100644 index 000000000000..fc570edc340b --- /dev/null +++ b/docs/2.19.x/how-to-guides/execution/logging.mdx @@ -0,0 +1,419 @@ +--- +title: Logging +description: Prefect enables you to log a variety of useful information about your flow and task runs, capturing information about your workflows for purposes such as monitoring, troubleshooting, and auditing. +--- + +Prefect captures logs for your flow and task runs by default, even if you have not started a Prefect server with `prefect server start`. + +You can view and filter logs in the [Prefect UI](https://docs.prefect.io/ui/flow-runs/#inspect-a-flow-run) or Prefect Cloud, or access log records via the API. + +Prefect enables fine-grained customization of log levels for flows and tasks, including configuration for default levels and log message formatting. + +Logging overview +------------------------------------------------------- + +Whenever you run a flow, Prefect automatically logs events for flow runs and task runs, along with any custom log handlers you have configured. No configuration is needed to enable Prefect logging. + +For example, say you created a simple flow in a file `flow.py`. If you create a local flow run with `python flow.py`, you'll see an example of the log messages created automatically by Prefect: + +``` +16:45:44.534 | INFO | prefect.engine - Created flow run 'gray-dingo' for flow +'hello-flow' +16:45:44.534 | INFO | Flow run 'gray-dingo' - Using task runner 'SequentialTaskRunner' +16:45:44.598 | INFO | Flow run 'gray-dingo' - Created task run 'hello-task-54135dc1-0' +for task 'hello-task' +Hello world! +16:45:44.650 | INFO | Task run 'hello-task-54135dc1-0' - Finished in state +Completed(None) +16:45:44.672 | INFO | Flow run 'gray-dingo' - Finished in state +Completed('All states completed.') + +``` + + +You can see logs for a flow run in the Prefect UI by navigating to the [**Flow Runs**](https://docs.prefect.io/ui/flow-runs/#inspect-a-flow-run) page and selecting a specific flow run to inspect. + +![Viewing logs for a flow run in the Prefect UI](/images/logging1.png) + +These log messages reflect the logging configuration for log levels and message formatters. You may customize the log levels captured and the default message format through configuration, and you can capture custom logging events by explicitly emitting log messages during flow and task runs. + +Prefect supports the standard Python logging levels `CRITICAL`, `ERROR`, `WARNING`, `INFO`, and `DEBUG`. By default, Prefect displays `INFO`\-level and above events. You can configure the root logging level as well as specific logging levels for flow and task runs. + +Logging configuration +----------------------------------------------------------------- + +### Logging settings + +Prefect provides several settings for configuring [logging level and loggers](https://docs.prefect.io/concepts/logs/). + +By default, Prefect displays `INFO`\-level and above logging records. You may change this level to `DEBUG` and `DEBUG`\-level logs created by Prefect will be shown as well. You may need to change the log level used by loggers from other libraries to see their log records. + +You can override any logging configuration by setting an environment variable or [Prefect Profile](https://docs.prefect.io/concepts/settings/) setting using the syntax `PREFECT_LOGGING_[PATH]_[TO]_[KEY]`, with `[PATH]_[TO]_[KEY]` corresponding to the nested address of any setting. + +For example, to change the default logging levels for Prefect to `DEBUG`, you can set the environment variable `PREFECT_LOGGING_LEVEL="DEBUG"`. + +You may also configure the "root" Python logger. The root logger receives logs from all loggers unless they explicitly opt out by disabling propagation. By default, the root logger is configured to output `WARNING` level logs to the console. As with other logging settings, you can override this from the environment or in the logging configuration file. For example, you can change the level with the variable `PREFECT_LOGGING_ROOT_LEVEL`. + +You may adjust the log level used by specific handlers. For example, you could set `PREFECT_LOGGING_HANDLERS_API_LEVEL=ERROR` to have only `ERROR` logs reported to the Prefect API. The console handlers will still default to level `INFO`. + +There is a [`logging.yml`](https://github.com/PrefectHQ/prefect/blob/main/src/prefect/logging/logging.yml) file packaged with Prefect that defines the default logging configuration. + +You can customize logging configuration by creating your own version of `logging.yml` with custom settings, by either creating the file at the default location (`/.prefect/logging.yml`) or by specifying the path to the file with `PREFECT_LOGGING_SETTINGS_PATH`. (If the file does not exist at the specified location, Prefect ignores the setting and uses the default configuration.) + +See the Python [Logging configuration](https://docs.python.org/3/library/logging.config.html#logging.config.dictConfig) documentation for more information about the configuration options and syntax used by `logging.yml`. + +Prefect loggers +----------------------------------------------------- + +To access the Prefect logger, import `from prefect import get_run_logger`. You can send messages to the logger in both flows and tasks. + +### Logging in flows + +To log from a flow, retrieve a logger instance with `get_run_logger()`, then call the standard Python [logging methods](https://docs.python.org/3/library/logging.html). + +```python +from prefect import flow, get_run_logger + +@flow(name="log-example-flow") +def logger_flow(): + logger = get_run_logger() + logger.info("INFO level log message.") + +``` + + +Prefect automatically uses the flow run logger based on the flow context. If you run the above code, Prefect captures the following as a log event. + +``` +15:35:17.304 | INFO | Flow run 'mottled-marten' - INFO level log message. + +``` + + +The default flow run log formatter uses the flow run name for log messages. + +**Note** + +``` +Starting in 2.7.11, if you use a logger that sends logs to the API outside of a flow or task run, a warning will be displayed instead of an error. You can silence this warning by setting `PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW=ignore` or have the logger raise an error by setting the value to `error`. + +``` + + +### Logging in tasks + +Logging in tasks works much as logging in flows: retrieve a logger instance with `get_run_logger()`, then call the standard Python [logging methods](https://docs.python.org/3/library/logging.html). + +```python +from prefect import flow, task, get_run_logger + +@task(name="log-example-task") +def logger_task(): + logger = get_run_logger() + logger.info("INFO level log message from a task.") + +@flow(name="log-example-flow") +def logger_flow(): + logger_task() + +``` + + +Prefect automatically uses the task run logger based on the task context. The default task run log formatter uses the task run name for log messages. + +``` +15:33:47.179 | INFO | Task run 'logger_task-80a1ffd1-0' - INFO level log message from a task. + +``` + + +The underlying log model for task runs captures the task name, task run ID, and parent flow run ID, which are persisted to the database for reporting and may also be used in custom message formatting. + +### Logging print statements + +Prefect provides the `log_prints` option to enable the logging of `print` statements at the task or flow level. When `log_prints=True` for a given task or flow, the Python builtin `print` will be patched to redirect to the Prefect logger for the scope of that task or flow. + +By default, tasks and subflows will inherit the `log_prints` setting from their parent flow, unless opted out with their own explicit `log_prints` setting. + +```python +from prefect import task, flow + +@task +def my_task(): + print("we're logging print statements from a task") + +@flow(log_prints=True) +def my_flow(): + print("we're logging print statements from a flow") + my_task() + +``` + + +Will output: + +``` +15:52:11.244 | INFO | prefect.engine - Created flow run 'emerald-gharial' for flow 'my-flow' +15:52:11.812 | INFO | Flow run 'emerald-gharial' - we're logging print statements from a flow +15:52:11.926 | INFO | Flow run 'emerald-gharial' - Created task run 'my_task-20c6ece6-0' for task 'my_task' +15:52:11.927 | INFO | Flow run 'emerald-gharial' - Executing 'my_task-20c6ece6-0' immediately... +15:52:12.217 | INFO | Task run 'my_task-20c6ece6-0' - we're logging print statements from a task + +``` + + +```python +from prefect import task, flow + +@task +def my_task(log_prints=False): + print("not logging print statements in this task") + +@flow(log_prints=True) +def my_flow(): + print("we're logging print statements from a flow") + my_task() + +``` + + +Using `log_prints=False` at the task level will output: + +``` +15:52:11.244 | INFO | prefect.engine - Created flow run 'emerald-gharial' for flow 'my-flow' +15:52:11.812 | INFO | Flow run 'emerald-gharial' - we're logging print statements from a flow +15:52:11.926 | INFO | Flow run 'emerald-gharial' - Created task run 'my_task-20c6ece6-0' for task 'my_task' +15:52:11.927 | INFO | Flow run 'emerald-gharial' - Executing 'my_task-20c6ece6-0' immediately... +not logging print statements in this task + +``` + + +You can also configure this behavior globally for all Prefect flows, tasks, and subflows. + +``` +prefect config set PREFECT_LOGGING_LOG_PRINTS=True + +``` + + +Formatters +------------------------------------------- + +Prefect log formatters specify the format of log messages. You can see details of message formatting for different loggers in [`logging.yml`](https://github.com/PrefectHQ/prefect/blob/main/src/prefect/logging/logging.yml). For example, the default formatting for task run log records is: + +``` +"%(asctime)s.%(msecs)03d | %(levelname)-7s | Task run %(task_run_name)r - %(message)s" + +``` + + +The variables available to interpolate in log messages varies by logger. In addition to the run context, message string, and any keyword arguments, flow and task run loggers have access to additional variables. + +The flow run logger has the following: + +* `flow_run_name` +* `flow_run_id` +* `flow_name` + +The task run logger has the following: + +* `task_run_id` +* `flow_run_id` +* `task_run_name` +* `task_name` +* `flow_run_name` +* `flow_name` + +You can specify custom formatting by setting an environment variable or by modifying the formatter in a `logging.yml` file as described earlier. For example, to change the formatting for the flow runs formatter: + +``` +PREFECT_LOGGING_FORMATTERS_STANDARD_FLOW_RUN_FMT="%(asctime)s.%(msecs)03d | %(levelname)-7s | %(flow_run_id)s - %(message)s" + +``` + + +The resulting messages, using the flow run ID instead of name, would look like this: + +``` +10:40:01.211 | INFO | e43a5a80-417a-41c4-a39e-2ef7421ee1fc - Created task run +'othertask-1c085beb-3' for task 'othertask' + +``` + + +Styles +----------------------------------- + +By default, Prefect highlights specific keywords in the console logs with a variety of colors. + +Highlighting can be toggled on/off with the `PREFECT_LOGGING_COLORS` setting, e.g. + +``` +PREFECT_LOGGING_COLORS=False + +``` + + +You can change what gets highlighted and also adjust the colors by updating the styles in a `logging.yml` file. Below lists the specific keys built-in to the `PrefectConsoleHighlighter`. + +URLs: + +* `log.web_url` +* `log.local_url` + +Log levels: + +* `log.info_level` +* `log.warning_level` +* `log.error_level` +* `log.critical_level` + +State types: + +* `log.pending_state` +* `log.running_state` +* `log.scheduled_state` +* `log.completed_state` +* `log.cancelled_state` +* `log.failed_state` +* `log.crashed_state` + +Flow (run) names: + +* `log.flow_run_name` +* `log.flow_name` + +Task (run) names: + +* `log.task_run_name` +* `log.task_name` + +You can also build your own handler with a [custom highlighter](https://rich.readthedocs.io/en/stable/highlighting.html#custom-highlighters). For example, to additionally highlight emails: + +1. Copy and paste the following into `my_package_or_module.py` (rename as needed) in the same directory as the flow run script, or ideally part of a Python package so it's available in `site-packages` to be accessed anywhere within your environment. + +```python +import logging +from typing import Dict, Union + +from rich.highlighter import Highlighter + +from prefect.logging.handlers import PrefectConsoleHandler +from prefect.logging.highlighters import PrefectConsoleHighlighter + +class CustomConsoleHighlighter(PrefectConsoleHighlighter): + base_style = "log." + highlights = PrefectConsoleHighlighter.highlights + [ + # ?P is naming this expression as `email` + r"(?P[\w-]+@([\w-]+\.)+[\w-]+)", + ] + +class CustomConsoleHandler(PrefectConsoleHandler): + def __init__( + self, + highlighter: Highlighter = CustomConsoleHighlighter, + styles: Dict[str, str] = None, + level: Union[int, str] = logging.NOTSET, + ): + super().__init__(highlighter=highlighter, styles=styles, level=level) + +``` + + +1. Update `/.prefect/logging.yml` to use `my_package_or_module.CustomConsoleHandler` and additionally reference the base\_style and named expression: `log.email`. + +```python + console_flow_runs: + level: 0 + class: my_package_or_module.CustomConsoleHandler + formatter: flow_runs + styles: + log.email: magenta + # other styles can be appended here, e.g. + # log.completed_state: green + +``` + + +1. Then on your next flow run, text that looks like an email will be highlighted--e.g. `my@email.com` is colored in magenta here. + +```python +from prefect import flow, get_run_logger + +@flow +def log_email_flow(): + logger = get_run_logger() + logger.info("my@email.com") + +log_email_flow() + +``` + + +Applying markup in logs +--------------------------------------------------------------------- + +To use [Rich's markup](https://rich.readthedocs.io/en/stable/markup.html#console-markup) in Prefect logs, first configure `PREFECT_LOGGING_MARKUP`. + +``` +PREFECT_LOGGING_MARKUP=True + +``` + + +Then, the following will highlight "fancy" in red. + +```python +from prefect import flow, get_run_logger + +@flow +def my_flow(): + logger = get_run_logger() + logger.info("This is [bold red]fancy[/]") + +my_flow() + +``` + + +**Inaccurate logs could result** + + +Although this can be convenient, the downside is, if enabled, strings that contain square brackets may be inaccurately interpreted and lead to incomplete output, e.g. `DROP TABLE [dbo].[SomeTable];"` outputs `DROP TABLE .[SomeTable];`. + +Log database schema +------------------------------------------------------------- + +Logged events are also persisted to the Prefect database. A log record includes the following data: + + +| Column | Description | +|-------------|------------------------------------------------------------------------------------------------------------------------------| +| id | Primary key ID of the log record. | +| created | Timestamp specifying when the record was created. | +| updated | Timestamp specifying when the record was updated. | +| name | String specifying the name of the logger. | +| level | Integer representation of the logging level. | +| flow_run_id | ID of the flow run associated with the log record. If the log record is for a task run, this is the parent flow of the task. | +| task_run_id | ID of the task run associated with the log record. Null if logging a flow run event. | +| message | Log message. | +| timestamp | The client-side timestamp of this logged statement. | + + +For more information, see [Log schema](https://docs.prefect.io/api-ref/server/schemas/core/#prefect.server.schemas.core.Log) in the API documentation. + +Including logs from other libraries +--------------------------------------------------------------------------------------------- + +By default, Prefect won't capture log statements from libraries that your flows and tasks use. You can tell Prefect to include logs from these libraries with the `PREFECT_LOGGING_EXTRA_LOGGERS` setting. + +To use this setting, specify one or more Python library names to include, separated by commas. For example, if you want to make sure Prefect captures Dask and SciPy logging statements with your flow and task run logs: + +``` +PREFECT_LOGGING_EXTRA_LOGGERS=dask,scipy + +``` + + +You can set this setting as an environment variable or in a profile. See [Settings](https://docs.prefect.io/concepts/settings/) for more details about how to use settings. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/execution/managed-execution.mdx b/docs/2.19.x/how-to-guides/execution/managed-execution.mdx new file mode 100644 index 000000000000..ac6f37ca9a97 --- /dev/null +++ b/docs/2.19.x/how-to-guides/execution/managed-execution.mdx @@ -0,0 +1,132 @@ +--- +title: Managed Execution +--- + +Prefect Cloud can run your flows on your behalf with Prefect Managed work pools. Flows run with this work pool do not require a worker or cloud provider account. Prefect handles the infrastructure and code execution for you. + +Managed execution is a great option for users who want to get started quickly, with no infrastructure setup. + + +**Managed Execution is in beta** + +Managed Execution is currently in beta. Features are likely to change without warning. + + +Usage guide +--------------------------------------------- + +Run a flow with managed infrastructure in three steps. + +### Step 1 + +Create a new work pool of type Prefect Managed in the UI or the CLI. Here's the command to create a new work pool using the CLI: + +``` +prefect work-pool create my-managed-pool --type prefect:managed + +``` + + +### Step 2 + +Create a deployment using the flow `deploy` method or `prefect.yaml`. + +Specify the name of your managed work pool, as shown in this example that uses the `deploy` method: + +managed-execution.py + +```python +from prefect import flow + +if __name__ == "__main__": + flow.from_source( + source="https://github.com/prefecthq/demo.git", + entrypoint="flow.py:my_flow", + ).deploy( + name="test-managed-flow", + work_pool_name="my-managed-pool", + ) + +``` + + +With your [CLI authenticated to your Prefect Cloud workspace](https://docs.prefect.io/cloud/users/api-keys/), run the script to create your deployment: + +``` +python managed-execution.py + +``` + + +Note that this deployment uses flow code stored in a GitHub repository. + +### Step 3 + +Run the deployment from the UI or from the CLI. + +That's it! You ran a flow on remote infrastructure without any infrastructure setup, starting a worker, or needing a cloud provider account. + +### Adding dependencies + +Prefect can install Python packages in the container that runs your flow at runtime. You can specify these dependencies in the **Pip Packages** field in the UI, or by configuring `job_variables={"pip_packages": ["pandas", "prefect-aws"]}` in your deployment creation like this: + +```python +from prefect import flow + +if __name__ == "__main__": + flow.from_source( + source="https://github.com/prefecthq/demo.git", + entrypoint="flow.py:my_flow", + ).deploy( + name="test-managed-flow", + work_pool_name="my-managed-pool", + job_variables={"pip_packages": ["pandas", "prefect-aws"]} + ) + +``` + + +Alternatively, you can create a `requirements.txt` file and reference it in your `prefect.yaml` `pull_step`. + +Limitations +--------------------------------------------- + +Managed execution requires Prefect 2.14.4 or newer. + +All limitations listed below may change without warning during the beta period. We will update this page as we make changes. + +### Concurrency & work pools + +Free tier accounts are limited to: + +* Maximum of 1 concurrent flow run per workspace across all `prefect:managed` pools. +* Maximum of 1 managed execution work pool per workspace. + +Pro tier and above accounts are limited to: + +* Maximum of 10 concurrent flow runs per workspace across all `prefect:managed` pools. +* Maximum of 5 managed execution work pools per workspace. + +### Images + +At this time, managed execution requires that you run the official Prefect Docker image: `prefecthq/prefect:2-latest`. However, as noted above, you can install Python package dependencies at runtime. If you need to use your own image, we recommend using another type of work pool. + +### Code storage + +Flow code must be stored in an accessible remote location. This means git-based cloud providers such as GitHub, Bitbucket, or GitLab are supported. Remote block-based storage is also supported, so S3, GCS, and Azure Blob are additional code storage options. + +### Resources + +Memory is limited to 2GB of RAM, which includes all operations such as dependency installation. Maximum job run time is 24 hours. + +Usage limits +----------------------------------------------- + +Free tier accounts are limited to ten compute hours per workspace per month. Pro tier and above accounts are limited to 250 hours per workspace per month. You can view your compute hours quota usage on the **Work Pools** page in the UI. + +Next steps +------------------------------------------- + +Read more about creating deployments in the [deployment guide](https://docs.prefect.io/guides/prefect-deploy/). + +If you find that you need more control over your infrastructure, such as the ability to run custom Docker images, serverless push work pools might be a good option. Read more [here](https://docs.prefect.io/guides/deployment/push-work-pools/). \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/execution/read--write-data.mdx b/docs/2.19.x/how-to-guides/execution/read--write-data.mdx new file mode 100644 index 000000000000..b3370e5a5691 --- /dev/null +++ b/docs/2.19.x/how-to-guides/execution/read--write-data.mdx @@ -0,0 +1,339 @@ +--- +title: Read and Write Data to and from Cloud Provider Storage¶ +sidebarTitle: Read and Write Data +--- + +Writing data to cloud-based storage and reading data from that storage is a common task in data engineering. In this guide we'll learn how to use Prefect to move data to and from AWS, Azure, and GCP blob storage. + +Prerequisites +------------------------------------------------- + +* Prefect [installed](https://docs.prefect.io/getting-started/installation/) +* Authenticated with [Prefect Cloud](https://docs.prefect.io/cloud/cloud-quickstart/) (or self-hosted [Prefect server](https://docs.prefect.io/guides/host/) instance) +* A cloud provider account (e.g. [AWS](https://aws.amazon.com/)) + +Install relevant Prefect integration library +--------------------------------------------------------------------------------------------------------------- + +In the CLI, install the Prefect integration library for your cloud provider: + + +[prefect-aws](https://prefecthq.github.io/prefect-aws/) provides blocks for interacting with AWS services. + +``` +pip install -U prefect-aws + +``` + + +[prefect-azure](https://prefecthq.github.io/prefect-azure/) provides blocks for interacting with Azure services. + +``` + pip install -U prefect-azure + +``` + + +[prefect-gcp](https://prefecthq.github.io/prefect-gcp/) provides blocks for interacting with GCP services. + +``` + pip install -U prefect-gcp + +``` + + + + + + +Register the block types +----------------------------------------------------------------------- + +Register the new block types with Prefect Cloud (or with your self-hosted Prefect server instance): + + + +``` +prefect block register -m prefect_aws + +``` + + +``` +prefect block register -m prefect_azure + +``` + + +``` +prefect block register -m prefect_gcp + +``` + + + + + +We should see a message in the CLI that several block types were registered. If we check the UI, we should see the new block types listed. + +Create a storage bucket +--------------------------------------------------------------------- + +Create a storage bucket in the cloud provider account. Ensure the bucket is publicly accessible or create a user or service account with the appropriate permissions to fetch and write data to the bucket. + +Create a credentials block +--------------------------------------------------------------------------- + +If the bucket is private, there are several options to authenticate: + +1. At deployment runtime, ensure the runtime environment is authenticated. +2. Create a block with configuration details and reference it when creating the storage block. + +If saving credential details in a block we can use a credentials block specific to the cloud provider or use a more generic secret block. We can create [blocks](https://docs.prefect.io/concepts/blocks/) via the UI or Python code. Below we'll use Python code to create a credentials block for our cloud provider. + +Credentials safety + +Reminder, don't store credential values in public locations such as public git platform repositories. In the examples below we use environment variables to store credential values. + + + +```python +import os +from prefect_aws import AwsCredentials + +my_aws_creds = AwsCredentials( + aws_access_key_id="123abc", + aws_secret_access_key=os.environ.get("MY_AWS_SECRET_ACCESS_KEY"), +) +my_aws_creds.save(name="my-aws-creds-block", overwrite=True) + +``` + + +```python +import os +from prefect_azure import AzureBlobStorageCredentials + +my_azure_creds = AzureBlobStorageCredentials( + connection_string=os.environ.get("MY_AZURE_CONNECTION_STRING"), +) +my_azure_creds.save(name="my-azure-creds-block", overwrite=True) + +``` + + +```python +import os +from prefect_gcp import GCPCredentials + +my_gcp_creds = GCPCredentials( + service_account_info=os.environ.get("GCP_SERVICE_ACCOUNT_KEY_FILE_CONTENTS"), +) +my_gcp_creds.save(name="my-gcp-creds-block", overwrite=True) + +``` + + + + +We recommend specifying the service account key file contents as a string, rather than the path to the file, because that file might not be available in your production environments. + + + + +Run the code to create the block. We should see a message that the block was created. + +Create a storage block +------------------------------------------------------------------- + +Let's create a block for the chosen cloud provider using Python code or the UI. In this example we'll use Python code. + + + +Note that the `S3Bucket` block is not the same as the `S3` block that ships with Prefect. The `S3Bucket` block we use in this example is part of the `prefect-aws` library and provides additional functionality. + +We'll reference the credentials block created above. + +```python +from prefect_aws import S3Bucket + +s3bucket = S3Bucket.create( + bucket="my-bucket-name", + credentials="my-aws-creds-block" + ) +s3bucket.save(name="my-s3-bucket-block", overwrite=True) + +``` + + +Note that the `AzureBlobStorageCredentials` block is not the same as the Azure block that ships with Prefect. The `AzureBlobStorageCredentials` block we use in this example is part of the `prefect-azure` library and provides additional functionality. + +Azure blob storage doesn't require a separate block, the connection string used in the `AzureBlobStorageCredentials` block can encode the information needed. + + + +Note that the `GcsBucket` block is not the same as the `GCS` block that ships with Prefect. The `GcsBucket` block is part of the `prefect-gcp` library and provides additional functionality. We'll use it here. + +We'll reference the credentials block created above. + +```python +from prefect_gcp.cloud_storage import GcsBucket + +gcsbucket = GcsBucket( + bucket="my-bucket-name", + credentials="my-gcp-creds-block" + ) +gcsbucket.save(name="my-gcs-bucket-block", overwrite=True) + +``` + + + + + +Run the code to create the block. We should see a message that the block was created. + +Use your new block inside a flow to write data to your cloud provider. + + + +```python +from pathlib import Path +from prefect import flow +from prefect_aws.s3 import S3Bucket + +@flow() +def upload_to_s3(): + """Flow function to upload data""" + path = Path("my_path_to/my_file.parquet") + aws_block = S3Bucket.load("my-s3-bucket-block") + aws_block.upload_from_path(from_path=path, to_path=path) + +if __name__ == "__main__": + upload_to_s3() + +``` + + +```python +from prefect import flow +from prefect_azure import AzureBlobStorageCredentials +from prefect_azure.blob_storage import blob_storage_upload + +@flow +def upload_to_azure(): + """Flow function to upload data""" + blob_storage_credentials = AzureBlobStorageCredentials.load( + name="my-azure-creds-block" + ) + + with open("my_path_to/my_file.parquet", "rb") as f: + blob_storage_upload( + data=f.read(), + container="my_container", + blob="my_path_to/my_file.parquet", + blob_storage_credentials=blob_storage_credentials, + ) + +if __name__ == "__main__": + upload_to_azure() + +``` + + +```python +from pathlib import Path +from prefect import flow +from prefect_gcp.cloud_storage import GcsBucket + +@flow() +def upload_to_gcs(): + """Flow function to upload data""" + path = Path("my_path_to/my_file.parquet") + gcs_block = GcsBucket.load("my-gcs-bucket-block") + gcs_block.upload_from_path(from_path=path, to_path=path) + +if __name__ == "__main__": + upload_to_gcs() + +``` + + + + +Read data +----------------------------------------- + +Use your block to read data from your cloud provider inside a flow. + + + +```python +from prefect import flow +from prefect_aws import S3Bucket + +@flow +def download_from_s3(): + """Flow function to download data""" + s3_block = S3Bucket.load("my-s3-bucket-block") + s3_block.get_directory( + from_path="my_path_to/my_file.parquet", + local_path="my_path_to/my_file.parquet" + ) + +if __name__ == "__main__": + download_from_s3() + +``` + + +```python +from prefect import flow +from prefect_azure import AzureBlobStorageCredentials +from prefect_azure.blob_storage import blob_storage_download + +@flow +def download_from_azure(): + """Flow function to download data""" + blob_storage_credentials = AzureBlobStorageCredentials.load( + name="my-azure-creds-block" + ) + blob_storage_download( + blob="my_path_to/my_file.parquet", + container="my_container", + blob_storage_credentials=blob_storage_credentials, + ) + +if __name__ == "__main__": + download_from_azure() + +``` + + + +```python +from prefect import flow +from prefect_gcp.cloud_storage import GcsBucket + +@flow +def download_from_gcs(): + gcs_block = GcsBucket.load("my-gcs-bucket-block") + gcs_block.get_directory( + from_path="my_path_to/my_file.parquet", + local_path="my_path_to/my_file.parquet" + ) + +if __name__ == "__main__": + download_from_gcs() + +``` + + + + +In this guide we've seen how to use Prefect to read data from and write data to cloud providers! + +Next steps +------------------------------------------- + +Check out the [`prefect-aws`](https://prefecthq.github.io/prefect-aws/), [`prefect-azure`](https://prefecthq.github.io/prefect-azure/), and [`prefect-gcp`](https://prefecthq.github.io/prefect-gcp/) docs to see additional methods for interacting with cloud storage providers. Each library also contains blocks for interacting with other cloud-provider services. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/execution/shell-commands.mdx b/docs/2.19.x/how-to-guides/execution/shell-commands.mdx new file mode 100644 index 000000000000..326b89d77286 --- /dev/null +++ b/docs/2.19.x/how-to-guides/execution/shell-commands.mdx @@ -0,0 +1,75 @@ +--- +title: Orchestrating Shell Commands with Prefect +sidebarTitle: Shell Commands +--- + +Harness the power of the Prefect CLI to execute and schedule shell commands as Prefect flows. This guide shows how to use the `watch` and `serve` commands to showcase the CLI's versatility for automation tasks. + +Here's what you'll learn: + +* Running a shell command as a Prefect flow on-demand with `watch`. +* Scheduling a shell command as a recurring Prefect flow using `serve`. +* The benefits of embedding these commands into your automation workflows. + +Prerequisites +------------------------------------------------- + +Before you begin, ensure you have: + +* A basic understanding of Prefect flows. Start with the [Getting Started](https://docs.prefect.io/getting-started/quickstart/) guide if you're new. +* A recent version of Prefect installed in your command line environment. Follow the instructions in the [docs](https://docs.prefect.io/getting-started/installation/) if you have any issues. + +The `watch` command +----------------------------------------------------------- + +The `watch` command wraps any shell command in a Prefect flow for instant execution, ideal for quick tasks or integrating shell scripts into your workflows. + +### Example usage + +Imagine you want to fetch the current weather in Chicago using the `curl` command. The following Prefect CLI command does just that: + +``` +prefect shell watch "curl http://wttr.in/Chicago?format=3" + +``` + + +This command makes a request to `wttr.in`, a console-oriented weather service, and prints the weather conditions for Chicago. + +### Benefits of `watch` + +* **Immediate feedback:** Execute shell commands within the Prefect framework for immediate results. +* **Easy integration:** Seamlessly blend external scripts or data fetching into your data workflows. +* **Visibility and logging:** Leverage Prefect's logging to track the execution and output of your shell tasks. + +Deploying with `serve` +----------------------------------------------------------------- + +When you need to run shell commands on a schedule, the `serve` command creates a Prefect \[deployment\](https://docs.prefect.io/concepts/deployments/ for regular execution. This is an extremely quick way to create a deployment that is served by Prefect. + +### Example usage + +To set up a daily weather report for Chicago at 9 AM, you can use the `serve` command as follows: + +``` +prefect shell serve "curl http://wttr.in/Chicago?format=3" --flow-name "Daily Chicago Weather Report" --cron-schedule "0 9 * * *" --deployment-name "Chicago Weather" + +``` + + +This command schedules a Prefect flow to fetch Chicago's weather conditions daily, providing consistent updates without manual intervention. Additionally, if you want to fetch the Chicago weather, you can manually create a run of your new deployment from the UI or the CLI. + +To shut down your server and pause your scheduled runs, hit `ctrl` + `c` in the CLI. + +### Benefits of `serve` + +* **Automated scheduling:** Schedule shell commands to run automatically, ensuring critical updates are generated and available on time. +* **Centralized workflow management:** Manage and monitor your scheduled shell commands inside Prefect for a unified workflow overview. +* **Configurable execution:** Tailor execution frequency, [concurrency limits](https://docs.prefect.io/guides/global-concurrency-limits/), and other parameters to suit your project's needs and resources. + +Next steps +------------------------------------------- + +With the `watch` and `serve` commands at your disposal, you're ready to incorporate shell command automation into your Prefect workflows. You can start with straightforward tasks like observing cron jobs and expand to more complex automation scenarios to enhance your workflows' efficiency and capabilities. + +Check out the [tutorial](https://docs.prefect.io/tutorial/) and explore other Prefect docs to learn how to gain more observability and orchestration capabilities in your workflows. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/execution/state-change-hooks.mdx b/docs/2.19.x/how-to-guides/execution/state-change-hooks.mdx new file mode 100644 index 000000000000..81660e34d394 --- /dev/null +++ b/docs/2.19.x/how-to-guides/execution/state-change-hooks.mdx @@ -0,0 +1,100 @@ +--- +title: State Change Hooks +--- +[State change hooks](https://docs.prefect.io/concepts/states/#state-change-hooks) execute code in response to changes in flow or task run states, enabling you to define actions for specific state transitions in a workflow. This guide provides examples of real-world use cases. + + +Example use cases +--------------------------------------------------------- + +### Send a notification when a flow run fails + +State change hooks enable you to customize messages sent when tasks transition between states, such as sending notifications containing sensitive information when tasks enter a `Failed` state. Let's run a client-side hook upon a flow run entering a `Failed` state. + +```python +from prefect import flow +from prefect.blocks.core import Block +from prefect.settings import PREFECT_API_URL + +def notify_slack(flow, flow_run, state): + slack_webhook_block = Block.load( + "slack-webhook/my-slack-webhook" + ) + + slack_webhook_block.notify( + ( + f"Your job {flow_run.name} entered {state.name} " + f"with message:\n\n" + f"See \n\n" + f"Tags: {flow_run.tags}\n\n" + f"Scheduled start: {flow_run.expected_start_time}" + ) + ) + +@flow(on_failure=[notify_slack], retries=1) +def failing_flow(): + raise ValueError("oops!") + +if __name__ == "__main__": + failing_flow() + +``` + + +Note that because we've configured retries in this example, the `on_failure` hook will not run until all `retries` have completed, when the flow run enters a `Failed` state. + +### Delete a Cloud Run job when a flow run crashes + +State change hooks can aid in managing infrastructure cleanup in scenarios where tasks spin up individual infrastructure resources independently of Prefect. When a flow run crashes, tasks may exit abruptly, resulting in the potential omission of cleanup logic within the tasks. State change hooks can be used to ensure infrastructure is properly cleaned up even when a flow run enters a `Crashed` state! + +Let's create a hook that deletes a Cloud Run job if the flow run crashes. + +```python +import os +from prefect import flow, task +from prefect.blocks.system import String +from prefect.client import get_client +import prefect.runtime + +async def delete_cloud_run_job(flow, flow_run, state): + """Flow run state change hook that deletes a Cloud Run Job if + the flow run crashes.""" + + # retrieve Cloud Run job name + cloud_run_job_name = await String.load( + name="crashing-flow-cloud-run-job" + ) + + # delete Cloud Run job + delete_job_command = f"yes | gcloud beta run jobs delete + {cloud_run_job_name.value} --region us-central1" + os.system(delete_job_command) + + # clean up the Cloud Run job string block as well + async with get_client() as client: + block_document = await client.read_block_document_by_name( + "crashing-flow-cloud-run-job", block_type_slug="string" + ) + await client.delete_block_document(block_document.id) + +@task +def my_task_that_crashes(): + raise SystemExit("Crashing on purpose!") + +@flow(on_crashed=[delete_cloud_run_job]) +def crashing_flow(): + """Save the flow run name (i.e. Cloud Run job name) as a + String block. It then executes a task that ends up crashing.""" + flow_run_name = prefect.runtime.flow_run.name + cloud_run_job_name = String(value=flow_run_name) + cloud_run_job_name.save( + name="crashing-flow-cloud-run-job", overwrite=True + ) + + my_task_that_crashes() + +if __name__ == "__main__": + crashing_flow() + +``` diff --git a/docs/2.19.x/how-to-guides/execution/troubleshooting.mdx b/docs/2.19.x/how-to-guides/execution/troubleshooting.mdx new file mode 100644 index 000000000000..05cd98208bdc --- /dev/null +++ b/docs/2.19.x/how-to-guides/execution/troubleshooting.mdx @@ -0,0 +1,169 @@ +--- +title: Troubleshooting +--- + +Don't Panic! If you experience an error with Prefect, there are many paths to understanding and resolving it. The first troubleshooting step is confirming that you are running the latest version of Prefect. If you are not, be sure to [upgrade](#upgrade) to the latest version, since the issue may have already been fixed. Beyond that, there are several categories of errors: + +* The issue may be in your flow code, in which case you should carefully read the [logs](#logs). +* The issue could be with how you are authenticated, and whether or not you are connected to [Cloud](#cloud). +* The issue might have to do with how your code is [executed](#execution). + +Upgrade +------------------------------------- + +Prefect is constantly evolving, adding new features and fixing bugs. Chances are that a patch has already been identified and released. Search existing [issues](https://github.com/PrefectHQ/prefect/issues) for similar reports and check out the [Release Notes](https://github.com/PrefectHQ/prefect/blob/main/RELEASE-NOTES.md). Upgrade to the newest version with the following command: + +``` +pip install --upgrade prefect + +``` + + +Different components may use different versions of Prefect: + +* **Cloud** will generally always be the newest version. Cloud is continuously deployed by the Prefect team. When using a self-hosted server, you can control this version. +* **Workers and agents** typically don't change versions frequently, and are usually whatever the latest version was at the time of creation. Workers and agents provision infrastructure for flow runs, so upgrading them may help with infrastructure problems. +* **Flows** could use a different version than the worker or agent that created them, especially when running in different environments. Suppose your worker and flow both use the latest official Docker image, but your worker was created a month ago. Your worker will often be on an older version than your flow. + + +**Integration Versions** + +Keep in mind that [integrations](https://docs.prefect.io/integrations/) are versioned and released independently of the core Prefect library. They should be upgraded simultaneously with the core library, using the same method. + + +Logs +------------------------------- + +In many cases, there will be an informative stack trace in Prefect's [logs](https://docs.prefect.io/concepts/logs/). **Read it carefully**, locate the source of the error, and try to identify the cause. + +There are two types of logs: + +* **Flow and task logs** are always scoped to a flow. They are sent to Prefect and are viewable in the UI. +* **Worker and agent logs** are not scoped to a flow and may have more information on what happened before the flow started. These logs are generally only available where the worker or agent is running. + +If your flow and task logs are empty, there may have been an infrastructure issue that prevented your flow from starting. Check your worker logs for more details. + +If there is no clear indication of what went wrong, try updating the logging level from the default `INFO` level to the `DEBUG` level. [Settings](https://docs.prefect.io/api-ref/prefect/settings/) such as the logging level are propagated from the worker environment to the flow run environment and can be set via environment variables or the `prefect config set` CLI: + +```python +# Using the CLI +prefect config set PREFECT_LOGGING_LEVEL=DEBUG + +# Using environment variables +export PREFECT_LOGGING_LEVEL=DEBUG + +``` + + +The `DEBUG` logging level produces a high volume of logs so consider setting it back to `INFO` once any issues are resolved. + +Cloud +--------------------------------- + +When using Prefect Cloud, there are the additional concerns of authentication and authorization. The Prefect API authenticates users and service accounts - collectively known as actors - with API keys. Missing, incorrect, or expired API keys will result in a 401 response with detail `Invalid authentication credentials`. Use the following command to check your authentication, replacing `$PREFECT_API_KEY` with your API key: + +``` +curl -s -H "Authorization: Bearer $PREFECT_API_KEY" "https://api.prefect.cloud/api/me/" + +``` + + +**Users vs Service Accounts** + +[Service accounts](https://docs.prefect.io/cloud/users/service-accounts/) - sometimes referred to as bots - represent non-human actors that interact with Prefect such as workers and CI/CD systems. Each human that interacts with Prefect should be represented as a user. User API keys start with `pnu_` and service account API keys start with `pnb_`. + + +Supposing the response succeeds, let's check our authorization. Actors can be members of [workspaces](https://docs.prefect.io/cloud/workspaces/). An actor attempting an action in a workspace they are not a member of will result in a 404 response. Use the following command to check your actor's workspace memberships: + +``` +curl -s -H "Authorization: Bearer $PREFECT_API_KEY" "https://api.prefect.cloud/api/me/workspaces" + +``` + + +**Formatting JSON** + +Python comes with a helpful [tool](https://docs.python.org/3/library/json.html#module-json.tool) for formatting JSON. Append the following to the end of the command above to make the output more readable: `| python -m json.tool` + + +Make sure your actor is a member of the workspace you are working in. Within a workspace, an actor has a [role](https://docs.prefect.io/cloud/users/roles/) which grants them certain permissions. Insufficient permissions will result in an error. For example, starting an agent or worker with the **Viewer** role, will result in errors. + +Execution +----------------------------------------- + +Prefect flows can be executed locally by the user, or remotely by a worker or agent. Local execution generally means that you - the user - run your flow directly with a command like `python flow.py`. Remote execution generally means that a worker runs your flow via a [deployment](https://docs.prefect.io/concepts/deployments/), optionally on different infrastructure. + +With remote execution, the creation of your flow run happens separately from its execution. Flow runs are assigned to a work pool and a work queue. For flow runs to execute, a worker must be subscribed to the work pool and work queue, otherwise the flow runs will go from `Scheduled` to `Late`. Ensure that your work pool and work queue have a subscribed worker. + +Local and remote execution can also differ in their treatment of relative imports. If switching from local to remote execution results in local import errors, try replicating the behavior by executing the flow locally with the `-m` flag (i.e. `python -m flow` instead of `python flow.py`). Read more about `-m` [here](https://stackoverflow.com/a/62923810). + +API tests return an unexpected 307 Redirected +----------------------------------------------------------------------------------------------------------------- + +**Summary:** Requests require a trailing `/` in the request URL. + +If you write a test that does not include a trailing `/` when making a request to a specific endpoint: + +```python +async def test_example(client): + response = await client.post("/my_route") + assert response.status_code == 201 + +``` + + +You'll see a failure like: + +```python +E assert 307 == 201 +E + where 307 = .status_code + +``` + + +To resolve this, include the trailing `/`: + +```python +async def test_example(client): + response = await client.post("/my_route/") + assert response.status_code == 201 + +``` + + +Note: requests to nested URLs may exhibit the _opposite_ behavior and require no trailing slash: + +```python +async def test_nested_example(client): + response = await client.post("/my_route/filter/") + assert response.status_code == 307 + + response = await client.post("/my_route/filter") + assert response.status_code == 200 + +``` + + +**Reference:** "HTTPX disabled redirect following by default" in [`0.22.0`](https://github.com/encode/httpx/blob/master/CHANGELOG.md#0200-13th-october-2021). + +`pytest.PytestUnraisableExceptionWarning` or `ResourceWarning` +---------------------------------------------------------------------------------------------------------------------------------------------- + +As you're working with one of the `FlowRunner` implementations, you may get an error like this one: + +```python +E pytest.PytestUnraisableExceptionWarning: Exception ignored in: +E +E Traceback (most recent call last): +E File ".../pytest_asyncio/plugin.py", line 306, in setup +E res = await func(**_add_kwargs(func, kwargs, event_loop, request)) +E ResourceWarning: unclosed + +.../_pytest/unraisableexception.py:78: PytestUnraisableExceptionWarning + +``` + + +This error is saying that your test suite (or the `prefect` library code) opened a connection to something (like a Docker daemon or a Kubernetes cluster) and didn't close it. + +It may help to re-run the specific test with `PYTHONTRACEMALLOC=25 pytest ...` so that Python can display more of the stack trace where the connection was opened. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/work-pools/custom-workers.mdx b/docs/2.19.x/how-to-guides/work-pools/custom-workers.mdx new file mode 100644 index 000000000000..e46e0ea3f8d8 --- /dev/null +++ b/docs/2.19.x/how-to-guides/work-pools/custom-workers.mdx @@ -0,0 +1,466 @@ +--- +sidebarTitle: Custom Workers +title: Developing a New Worker Type + +--- + +**Advanced Topic** + +This tutorial is for users who want to extend the Prefect framework and completing this successfully will require deep knowledge of Prefect concepts. For standard use cases, we recommend using one of the [available workers](https://docs.prefect.io/concepts/work-pools/#worker-types) instead. + + +Prefect workers are responsible for setting up execution infrastructure and starting flow runs on that infrastructure. + +A list of available workers can be found [here](https://docs.prefect.io/concepts/work-pools/#worker-types). What if you want to execute your flow runs on infrastructure that doesn't have an available worker type? This tutorial will walk you through creating a custom worker that can run your flows on your chosen infrastructure. + +Worker configuration +--------------------------------------------------------------- + +When setting up an execution environment for a flow run, a worker receives configuration for the infrastructure it is designed to work with. Examples of configuration values include memory allocation, CPU allocation, credentials, image name, etc. The worker then uses this configuration to create the execution environment and start the flow run. + +**How are the configuration values populated?** + + +The work pool that a worker polls for flow runs has a [base job template](https://docs.prefect.io/concepts/work-pools/#base-job-template) associated with it. The template is the contract for how configuration values populate for each flow run. + +The keys in the `job_configuration` section of this base job template match the worker's configuration class attributes. The values in the `job_configuration` section of the base job template are used to populate the attributes of the worker's configuration class. + +The work pool creator gets to decide how they want to populate the values in the `job_configuration` section of the base job template. The values can be hard-coded, templated using placeholders, or a mix of these two approaches. Because you, as the worker developer, don't know how the work pool creator will populate the values, you should set sensible defaults for your configuration class attributes as a matter of best practice. + + +### Implementing a `BaseJobConfiguration` subclass + +A worker developer defines their worker's configuration to function with a class extending [`BaseJobConfiguration`](https://docs.prefect.io/api-ref/prefect/workers/base/#prefect.workers.base.BaseJobConfiguration). + +`BaseJobConfiguration` has attributes that are common to all workers: + + +|Attribute |Description | +|-----------|-------------------------------------------------------------------------------| +|`name` |The name to assign to the created execution environment. | +|`env` |Environment variables to set in the created execution environment. | +|`labels` |The labels assigned to the created execution environment for metadata purposes.| +|`command` |The command to use when starting a flow run. | + + +Prefect sets values for each attribute before giving the configuration to the worker. If you want to customize the values of these attributes, use the [`prepare_for_flow_run`](https://docs.prefect.io/api-ref/prefect/workers/base/#prefect.workers.base.BaseJobConfiguration.prepare_for_flow_run) method. + +Here's an example `prepare_for_flow_run` method that adds a label to the execution environment: + +```python +def prepare_for_flow_run( + self, flow_run, deployment = None, flow = None, +): + super().prepare_for_flow_run(flow_run, deployment, flow) + self.labels.append("my-custom-label") + +``` + + +A worker configuration class is a [Pydantic model](https://docs.pydantic.dev/usage/models/), so you can add additional attributes to your configuration class as Pydantic fields. For example, if you want to allow memory and CPU requests for your worker, you can do so like this: + +```python +from pydantic import Field +from prefect.workers.base import BaseJobConfiguration + +class MyWorkerConfiguration(BaseJobConfiguration): + memory: int = Field( + default=1024, + description="Memory allocation for the execution environment." + ) + cpu: int = Field( + default=500, + description="CPU allocation for the execution environment." + ) + +``` + + +This configuration class will populate the `job_configuration` section of the resulting base job template. + +For this example, the base job template would look like this: + +``` +job_configuration: + name: "{{ name }}" + env: "{{ env }}" + labels: "{{ labels }}" + command: "{{ command }}" + memory: "{{ memory }}" + cpu: "{{ cpu }}" +variables: + type: object + properties: + name: + title: Name + description: Name given to infrastructure created by a worker. + type: string + env: + title: Environment Variables + description: Environment variables to set when starting a flow run. + type: object + additionalProperties: + type: string + labels: + title: Labels + description: Labels applied to infrastructure created by a worker. + type: object + additionalProperties: + type: string + command: + title: Command + description: The command to use when starting a flow run. In most cases, + this should be left blank and the command will be automatically generated + by the worker. + type: string + memory: + title: Memory + description: Memory allocation for the execution environment. + type: integer + default: 1024 + cpu: + title: CPU + description: CPU allocation for the execution environment. + type: integer + default: 500 + +``` + + +This base job template defines what values can be provided by deployment creators on a per-deployment basis and how those provided values will be translated into the configuration values that the worker will use to create the execution environment. + +Notice that each attribute for the class was added in the `job_configuration` section with placeholders whose name matches the attribute name. The `variables` section was also populated with the OpenAPI schema for each attribute. If a configuration class is used without explicitly declaring any template variables, the template variables will be inferred from the configuration class attributes. + +### Customizing Configuration Attribute Templates + +You can customize the template for each attribute for situations where the configuration values should use more sophisticated templating. For example, if you want to add units for the `memory` attribute, you can do so like this: + +```python +from pydantic import Field +from prefect.workers.base import BaseJobConfiguration + +class MyWorkerConfiguration(BaseJobConfiguration): + memory: str = Field( + default="1024Mi", + description="Memory allocation for the execution environment." + template="{{ memory_request }}Mi" + ) + cpu: str = Field( + default="500m", + description="CPU allocation for the execution environment." + template="{{ cpu_request }}m" + ) + +``` + + +Notice that we changed the type of each attribute to `str` to accommodate the units, and we added a new `template` attribute to each attribute. The `template` attribute is used to populate the `job_configuration` section of the resulting base job template. + +For this example, the `job_configuration` section of the resulting base job template would look like this: + +```python +job_configuration: + name: "{{ name }}" + env: "{{ env }}" + labels: "{{ labels }}" + command: "{{ command }}" + memory: "{{ memory_request }}Mi" + cpu: "{{ cpu_request }}m" + +``` + + +Note that to use custom templates, you will need to declare the template variables used in the template because the names of those variables can no longer be inferred from the configuration class attributes. We will cover how to declare the default variable schema in the [Worker Template Variables](#worker-template-variables) section. + +### Rules for template variable interpolation + +When defining a job configuration model, it's useful to understand how template variables are interpolated into the job configuration. The templating engine follows a few simple rules: + +1. If a template variable is the only value for a key in the `job_configuration` section, the key will be replaced with the value template variable. +2. If a template variable is part of a string (i.e., there is text before or after the template variable), the value of the template variable will be interpolated into the string. +3. If a template variable is the only value for a key in the `job_configuration` section and no value is provided for the template variable, the key will be removed from the `job_configuration` section. + +These rules allow worker developers and work pool maintainers to define template variables that can be complex types like dictionaries and lists. These rules also mean that worker developers should give reasonable default values to job configuration fields whenever possible because values are not guaranteed to be provided if template variables are unset. + +### Template variable usage strategies + +Template variables define the interface that deployment creators interact with to configure the execution environments of their deployments. The complexity of this interface can be controlled via the template variables that are defined for a base job template. This control allows work pool maintainers to find a point along the spectrum of flexibility and simplicity appropriate for their organization. + +There are two patterns that are represented in current worker implementations: + +#### Pass-through + +In the pass-through pattern, template variables are passed through to the job configuration with little change. This pattern exposes complete control to deployment creators but also requires them to understand the details of the execution environment. + +This pattern is useful when the execution environment is simple, and the deployment creators are expected to have high technical knowledge. + +The [Docker worker](https://prefecthq.github.io/prefect-docker/worker/) is an example of a worker that uses this pattern. + +#### Infrastructure as code templating + +Depending on the infrastructure they interact with, workers can sometimes employ a declarative infrastructure syntax (i.e., infrastructure as code) to create execution environments (e.g., a Kubernetes manifest or an ECS task definition). + +In the IaC pattern, it's often useful to use template variables to template portions of the declarative syntax which then can be used to generate the declarative syntax into a final form. + +This approach allows work pool creators to provide a simpler interface to deployment creators while also controlling which portions of infrastructure are configurable by deployment creators. + +The [Kubernetes worker](https://prefecthq.github.io/prefect-kubernetes/worker/) is an example of a worker that uses this pattern. + +### Configuring credentials + +When executing flow runs within cloud services, workers will often need credentials to authenticate with those services. For example, a worker that executes flow runs in AWS Fargate will need AWS credentials. As a worker developer, you can use blocks to accept credentials configuration from the user. + +For example, if you want to allow the user to configure AWS credentials, you can do so like this: + +```python +from prefect_aws import AwsCredentials + +class MyWorkerConfiguration(BaseJobConfiguration): + aws_credentials: AwsCredentials = Field( + default=None, + description="AWS credentials to use when creating AWS resources." + ) + +``` + + +Users can create and assign a block to the `aws_credentials` attribute in the UI and the worker will use these credentials when interacting with AWS resources. + +Worker template variables +------------------------------------------------------------------------- + +Providing template variables for a base job template defines the fields that deployment creators can override per deployment. The work pool creator ultimately defines the template variables for a base job template, but the worker developer is able to define default template variables for the worker to make it easier to use. + +Default template variables for a worker are defined by implementing the `BaseVariables` class. Like the `BaseJobConfiguration` class, the `BaseVariables` class has attributes that are common to all workers: + + +|Attribute |Description | +|-----------|----------------------------------------------------------------------------| +|`name` |The name to assign to the created execution environment. | +|`env` |Environment variables to set in the created execution environment. | +|`labels` |The labels assigned the created execution environment for metadata purposes.| +|`command` |The command to use when starting a flow run. | + + +Additional attributes can be added to the `BaseVariables` class to define additional template variables. For example, if you want to allow memory and CPU requests for your worker, you can do so like this: + +```python +from pydantic import Field +from prefect.workers.base import BaseVariables + +class MyWorkerTemplateVariables(BaseVariables): + memory_request: int = Field( + default=1024, + description="Memory allocation for the execution environment." + ) + cpu_request: int = Field( + default=500, + description="CPU allocation for the execution environment." + ) + +``` + + +When `MyWorkerTemplateVariables` is used in conjunction with `MyWorkerConfiguration` from the [Customizing Configuration Attribute Templates](#customizing-configuration-attribute-templates) section, the resulting base job template will look like this: + +``` +job_configuration: + name: "{{ name }}" + env: "{{ env }}" + labels: "{{ labels }}" + command: "{{ command }}" + memory: "{{ memory_request }}Mi" + cpu: "{{ cpu_request }}m" +variables: + type: object + properties: + name: + title: Name + description: Name given to infrastructure created by a worker. + type: string + env: + title: Environment Variables + description: Environment variables to set when starting a flow run. + type: object + additionalProperties: + type: string + labels: + title: Labels + description: Labels applied to infrastructure created by a worker. + type: object + additionalProperties: + type: string + command: + title: Command + description: The command to use when starting a flow run. In most cases, + this should be left blank and the command will be automatically generated + by the worker. + type: string + memory_request: + title: Memory Request + description: Memory allocation for the execution environment. + type: integer + default: 1024 + cpu_request: + title: CPU Request + description: CPU allocation for the execution environment. + type: integer + default: 500 + +``` + + +Note that template variable classes are never used directly. Instead, they are used to generate a schema that is used to populate the `variables` section of a base job template and validate the template variables provided by the user. + +We don't recommend using template variable classes within your worker implementation for validation purposes because the work pool creator ultimately defines the template variables. The configuration class should handle any necessary run-time validation. + +Worker implementation +----------------------------------------------------------------- + +Workers set up execution environments using provided configuration. Workers also observe the execution environment as the flow run executes and report any crashes to the Prefect API. + +### Attributes + +To implement a worker, you must implement the `BaseWorker` class and provide it with the following attributes: + + +|Attribute |Description |Required| +|-----------------------------|--------------------------------------------|--------| +|`type` |The type of the worker. |Yes | +|`job_configuration` |The configuration class for the worker. |Yes | +|`job_configuration_variables`|The template variables class for the worker.|No | +|`_documentation_url` |Link to documentation for the worker. |No | +|`_logo_url` |Link to a logo for the worker. |No | +|`_description` |A description of the worker. |No | + + +### Methods + +#### `run` + +In addition to the attributes above, you must also implement a `run` method. The `run` method is called for each flow run the worker receives for execution from the work pool. + +The `run` method has the following signature: + +```python + async def run( + self, flow_run: FlowRun, configuration: BaseJobConfiguration, task_status: Optional[anyio.abc.TaskStatus] = None, + ) -> BaseWorkerResult: + ... + +``` + + +The `run` method is passed: the flow run to execute, the execution environment configuration for the flow run, and a task status object that allows the worker to track whether the flow run was submitted successfully. + +The `run` method must also return a `BaseWorkerResult` object. The `BaseWorkerResult` object returned contains information about the flow run execution. For the most part, you can implement the `BaseWorkerResult` with no modifications like so: + +``` +from prefect.workers.base import BaseWorkerResult + +class MyWorkerResult(BaseWorkerResult): + """Result returned by the MyWorker.""" + +``` + + +If you would like to return more information about a flow run, then additional attributes can be added to the `BaseWorkerResult` class. + +#### `kill_infrastructure` + +Workers must implement a `kill_infrastructure` method to support flow run cancellation. The `kill_infrastructure` method is called when a flow run is canceled and is passed an identifier for the infrastructure to tear down and the execution environment configuration for the flow run. + +The `infrastructure_pid` passed to the `kill_infrastructure` method is the same identifier used to mark a flow run execution as started in the `run` method. The `infrastructure_pid` must be a string, but it can take on any format you choose. + +The `infrastructure_pid` should contain enough information to uniquely identify the infrastructure created for a flow run when used with the `job_configuration` passed to the `kill_infrastructure` method. Examples of useful information include: the cluster name, the hostname, the process ID, the container ID, etc. + +If a worker cannot tear down infrastructure created for a flow run, the `kill_infrastructure` command should raise an `InfrastructureNotFound` or `InfrastructureNotAvailable` exception. + +### Worker implementation example + +Below is an example of a worker implementation. This example is not intended to be a complete implementation but to illustrate the aforementioned concepts. + +```python +from prefect.workers.base import BaseWorker, BaseWorkerResult, BaseJobConfiguration, BaseVariables + +class MyWorkerConfiguration(BaseJobConfiguration): + memory: str = Field( + default="1024Mi", + description="Memory allocation for the execution environment." + template="{{ memory_request }}Mi" + ) + cpu: str = Field( + default="500m", + description="CPU allocation for the execution environment." + template="{{ cpu_request }}m" + ) + +class MyWorkerTemplateVariables(BaseVariables): + memory_request: int = Field( + default=1024, + description="Memory allocation for the execution environment." + ) + cpu_request: int = Field( + default=500, + description="CPU allocation for the execution environment." + ) + +class MyWorkerResult(BaseWorkerResult): + """Result returned by the MyWorker.""" + +class MyWorker(BaseWorker): + type = "my-worker" + job_configuration = MyWorkerConfiguration + job_configuration_variables = MyWorkerTemplateVariables + _documentation_url = "https://example.com/docs" + _logo_url = "https://example.com/logo" + _description = "My worker description." + + async def run( + self, flow_run: FlowRun, configuration: BaseJobConfiguration, task_status: Optional[anyio.abc.TaskStatus] = None, + ) -> BaseWorkerResult: + # Create the execution environment and start execution + job = await self._create_and_start_job(configuration) + + if task_status: + # Use a unique ID to mark the run as started. This ID is later used to tear down infrastructure + # if the flow run is cancelled. + task_status.started(job.id) + + # Monitor the execution + job_status = await self._watch_job(job, configuration) + + exit_code = job_status.exit_code if job_status else -1 # Get result of execution for reporting + return MyWorkerResult( + status_code=exit_code, + identifier=job.id, + ) + + async def kill_infrastructure(self, infrastructure_pid: str, configuration: BaseJobConfiguration) -> None: + # Tear down the execution environment + await self._kill_job(infrastructure_pid, configuration) + +``` + + +Most of the execution logic is omitted from the example above, but it shows that the typical order of operations in the `run` method is: 1. Create the execution environment and start the flow run execution 2. Mark the flow run as started via the passed `task_status` object 3. Monitor the execution 4. Get the execution's final status from the infrastructure and return a `BaseWorkerResult` object + +To see other examples of worker implementations, see the [`ProcessWorker`](https://docs.prefect.io/api-ref/prefect/workers/process/) and [`KubernetesWorker`](https://prefecthq.github.io/prefect-kubernetes/worker/) implementations. + +### Integrating with the Prefect CLI + +Workers can be started via the Prefect CLI by providing the `--type` option to the `prefect worker start` CLI command. To make your worker type available via the CLI, it must be available at import time. + +If your worker is in a package, you can add an entry point to your setup file in the following format: + +```python +entry_points={ + "prefect.collections": [ + "my_package_name = my_worker_module", + ] +}, + +``` + + +Prefect will discover this entry point and load your work module in the specified module. The entry point will allow the worker to be available via the CLI. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/work-pools/daemonize-processes.mdx b/docs/2.19.x/how-to-guides/work-pools/daemonize-processes.mdx new file mode 100644 index 000000000000..3bf453a676fb --- /dev/null +++ b/docs/2.19.x/how-to-guides/work-pools/daemonize-processes.mdx @@ -0,0 +1,197 @@ +--- +title: Daemonize Processes for Prefect Deployments +sidebarTitle: Daemonize Processes +--- + +When running workflow applications, it can be helpful to create long-running processes that run at startup and are robust to failure. In this guide you'll learn how to set up a systemd service to create long-running Prefect processes that poll for scheduled flow runs. + +A systemd service is ideal for running a long-lived process on a Linux VM or physical Linux server. We will leverage systemd and see how to automatically start a [Prefect worker](https://docs.prefect.io/concepts/work-pools/#worker-overview) or long-lived [`serve` process](https://docs.prefect.io/concepts/flows/#serving-a-flow) when Linux starts. This approach provides resilience by automatically restarting the process if it crashes. + +In this guide we will: + +* Create a Linux user +* Install and configure Prefect +* Set up a systemd service for the Prefect worker or `.serve` process + +Prerequisites +------------------------------------------------- + +* An environment with a linux operating system with [systemd](https://systemd.io/) and Python 3.8 or later. +* A superuser account (you can run `sudo` commands). +* A Prefect Cloud account, or a local instance of a Prefect server running on your network. +* If daemonizing a worker, you'll need a Prefect [deployment](https://docs.prefect.io/concepts/deployments/) with a [work pool](https://docs.prefect.io/concepts/work-pools/) your worker can connect to. + +If using an [AWS t2-micro EC2 instance](https://aws.amazon.com/ec2/instance-types/t2/) with an AWS Linux image, you can install Python and pip with `sudo yum install -y python3 python3-pip`. + +Step 1: Add a user +---------------------------------------------------------- + +Create a user account on your linux system for the Prefect process. While you can run a worker or serve process as root, it's good security practice to avoid doing so unless you are sure you need to. + +In a terminal, run: + +``` +sudo useradd -m prefect +sudo passwd prefect + +``` + + +When prompted, enter a password for the `prefect` account. + +Next, log in to the `prefect` account by running: + +Step 2: Install Prefect +-------------------------------------------------------------------- + +Run: + +This guide assumes you are installing Prefect globally, not in a virtual environment. If running a systemd service in a virtual environment, you'll just need to change the ExecPath. For example, if using [venv](https://docs.python.org/3/library/venv.html), change the ExecPath to target the `prefect` application in the `bin` subdirectory of your virtual environment. + +Next, set up your environment so that the Prefect client will know which server to connect to. + +If connecting to Prefect Cloud, follow [the instructions](https://docs.prefect.io/ui/cloud-getting-started/#create-an-api-key) to obtain an API key and then run the following: + +``` +prefect cloud login -k YOUR_API_KEY + +``` + + +When prompted, choose the Prefect workspace you'd like to log in to. + +If connecting to a self-hosted Prefect server instance instead of Prefect Cloud, run the following and substitute the IP address of your server: + +``` +prefect config set PREFECT_API_URL=http://your-prefect-server-IP:4200 + +``` + + +Finally, run the `exit` command to sign out of the `prefect` Linux account. This command switches you back to your sudo-enabled account so you will can run the commands in the next section. + +Step 3: Set up a systemd service +-------------------------------------------------------------------------------------- + +See the section below if you are setting up a Prefect worker. Skip to the [next section](#setting-up-a-systemd-service-for-serve) if you are setting up a Prefect `.serve` process. + +### Setting up a systemd service for a Prefect worker + +Move into the `/etc/systemd/system` folder and open a file for editing. We use the Vim text editor below. + +``` +cd /etc/systemd/system +sudo vim my-prefect-service.service + +``` + + +my-prefect-service.service + +``` +[Unit] +Description=Prefect worker + +[Service] +User=prefect +WorkingDirectory=/home +ExecStart=prefect worker start --pool YOUR_WORK_POOL_NAME +Restart=always + +[Install] +WantedBy=multi-user.target + +``` + + +Make sure you substitute your own work pool name. + +### Setting up a systemd service for `.serve` + +Copy your flow entrypoint Python file and any other files needed for your flow to run into the `/home` directory (or the directory of your choice). + +Here's a basic example flow: + + +```python my_file.py + +from prefect import flow + + +@flow(log_prints=True) +def say_hi(): + print("Hello!") + +if __name__=="__main__": + say_hi.serve(name="Greeting from daemonized .serve") + +``` + + +If you want to make changes to your flow code without restarting your process, you can push your code to git-based cloud storage (GitHub, BitBucket, GitLab) and use `flow.from_source().serve()`, as in the example below. + + +```python my_remote_flow_code_file.py +if __name__ == "__main__": +flow.from_source( + source="https://github.com/org/repo.git", + entrypoint="path/to/my_remote_flow_code_file.py:say_hi", +).serve(name="deployment-with-github-storage") + +``` + + +Make sure you substitute your own flow code entrypoint path. + +Note that if you change the flow entrypoint parameters, you will need to restart the process. + +Move into the `/etc/systemd/system` folder and open a file for editing. We use the Vim text editor below. + +``` +cd /etc/systemd/system +sudo vim my-prefect-service.service + +``` + + + +```python my-prefect-service.service + +[Unit] +Description=Prefect serve + +[Service] +User=prefect +WorkingDirectory=/home +ExecStart=python3 my_file.py +Restart=always + +[Install] +WantedBy=multi-user.target + +``` + + +Save, enable, and start the service +------------------------------------------------------------------------------------------- + +To save the file and exit Vim hit the escape key, type `:wq!`, then press the return key. + +Next, run `sudo systemctl daemon-reload` to make systemd aware of your new service. + +Then, run `sudo systemctl enable my-prefect-service` to enable the service. This command will ensure it runs when your system boots. + +Next, run `sudo systemctl start my-prefect-service` to start the service. + +Run your deployment from UI and check out the logs on the **Flow Runs** page. + +You can see if your daemonized Prefect worker or serve process is running and see the Prefect logs with `systemctl status my-prefect-service`. + +That's it! You now have a systemd service that starts when your system boots, and will restart if it ever crashes. + +Next steps +------------------------------------------- + +If you want to set up a long-lived process on a Windows machine the pattern is similar. Instead of systemd, you can use [NSSM](https://nssm.cc/). + +Check out other [Prefect guides](https://docs.prefect.io/guides/) to see what else you can do with Prefect! \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/work-pools/deploying-flows.mdx b/docs/2.19.x/how-to-guides/work-pools/deploying-flows.mdx new file mode 100644 index 000000000000..12bd88b96dab --- /dev/null +++ b/docs/2.19.x/how-to-guides/work-pools/deploying-flows.mdx @@ -0,0 +1,1153 @@ +--- +title: Deploying Flows to Work Pools and Workers +description: In this guide, we will configure a deployment that uses a work pool for dynamically provisioned infrastructure. +--- + + +All Prefect flow runs are tracked by the API. The API does not require prior registration of flows. With Prefect, you can call a flow locally or on a remote environment and it will be tracked. + +A deployment turns your workflow into an application that can be interacted with and managed via the Prefect API. A deployment enables you to: + +* Schedule flow runs. +* Specify event triggers for flow runs. +* Assign one or more tags to organize your deployments and flow runs. You can use those tags as filters in the Prefect UI. +* Assign custom parameter values for flow runs based on the deployment. +* Create ad-hoc flow runs from the API or Prefect UI. +* Upload flow files to a defined storage location for retrieval at run time. + + +**Deployments created with `.serve`** + +A deployment created with the Python `flow.serve` method or the `serve` function runs flows in a subprocess on the same machine where the deployment is created. It does not use a work pool or worker. + +Work pool-based deployments +----------------------------------------------------------------------------- + +A work pool-based deployment is useful when you want to dynamically scale the infrastructure where your flow code runs. Work pool-based deployments contain information about the infrastructure type and configuration for your workflow execution. + +Work pool-based deployment infrastructure options include the following: + +* Process - runs flow in a subprocess. In most cases, you're better off using `.serve`. +* [Docker](https://docs.prefect.io/guides/deployment/docker/) - runs flows in an ephemeral Docker container. +* [Kubernetes](https://docs.prefect.io/guides/deployment/kubernetes/) - runs flows as a Kubernetes Job. +* [Serverless Cloud Provider options](https://docs.prefect.io/guides/deployment/serverless-workers/) - runs flows in a Docker container in a serverless cloud provider environment, such as AWS ECS, Azure Container Instance, Google Cloud Run, or Vertex AI. + +The following diagram provides a high-level overview of the conceptual elements involved in defining a work-pool based deployment that is polled by a worker and executes a flow run based on that deployment. + + +![](/images/deploying-flows1.png) + +The work pool types above require a worker to be running on your infrastructure to poll a work pool for scheduled flow runs. + + +**Additional work pool options available with Prefect Cloud** + +Prefect Cloud offers other flavors of work pools that don't require a worker: + +* [Push Work Pools](https://docs.prefect.io/guides/deployment/push-work-pools) - serverless cloud options that don't require a worker because Prefect Cloud submits them to your serverless cloud infrastructure on your behalf. Prefect can auto-provision your cloud infrastructure for you and set it up to use your work pool. + +* [Managed Execution](https://docs.prefect.io/guides/managed-execution/) Prefect Cloud submits and runs your deployment on serverless infrastructure. No cloud provider account required. + + +In this guide, we focus on deployments that require a worker. + +Work pool-based deployments that use a worker also allow you to assign a work queue name to prioritize work and allow you to limit concurrent runs at the work pool level. + +When creating a deployment that uses a work pool and worker, we must answer _two_ basic questions: + +* What instructions does a [worker](https://docs.prefect.io/concepts/work-pools/) need to set up an execution environment for our flow? For example, a flow may have Python package requirements, unique Kubernetes settings, or Docker networking configuration. +* How should the flow code be accessed? + +The [tutorial](https://docs.prefect.io/tutorial/deployments/) shows how you can create a deployment with a long-running process using `.serve` and how to move to a [work-pool-based deployment](https://docs.prefect.io/tutorial/workers/) setup with `.deploy`. See the discussion of when you might want to move to work-pool-based deployments [there](https://docs.prefect.io/tutorial/workers/#why-workers-and-work-pools). + +Next, we'll explore how to use `.deploy` to create deployments with Python code. If you'd prefer to learn about using a YAML-based alternative for managing deployment configuration, skip to the [later section on `prefect.yaml`](#creating-work-pool-based-deployments-with-prefectyaml). + +Creating work pool-based deployments with `.deploy` +-------------------------------------------------------------------------------------------------------------------------- + +### Automatically bake your code into a Docker image + +You can create a deployment from Python code by calling the `.deploy` method on a flow. + +```python buy.py +from prefect import flow + + +@flow(log_prints=True) +def buy(): + print("Buying securities") + + +if __name__ == "__main__": + buy.deploy( + name="my-code-baked-into-an-image-deployment", + work_pool_name="my-docker-pool", + image="my_registry/my_image:my_image_tag" + ) + +``` + + +Make sure you have the [work pool](https://docs.prefect.io/concepts/work-pools/) created in the Prefect Cloud workspace you are authenticated to or on your running self-hosted server instance. +Then run the script to create a deployment (in future examples this step will be omitted for brevity): + +You should see messages in your terminal that Docker is building your image. When the deployment build succeeds you will see helpful information in your terminal showing you how to start a worker for your deployment and how to run your deployment. Your deployment will be visible on the `Deployments` page in the UI. + +By default, `.deploy` will build a Docker image with your flow code baked into it and push the image to the [Docker Hub](https://hub.docker.com/) registry specified in the `image` argument\`. + + +**Authentication to Docker Hub** + +You need your environment to be authenticated to your Docker registry to push an image to it. + + +You can specify a registry other than Docker Hub by providing the full registry path in the `image` argument. + + +**Warning** + +If building a Docker image, the environment in which you are creating the deployment needs to have Docker installed and running. + + + +To avoid pushing to a registry, set `push=False` in the `.deploy` method. + +``` +if __name__ == "__main__": + buy.deploy( + name="my-code-baked-into-an-image-deployment", + work_pool_name="my-docker-pool", + image="my_registry/my_image:my_image_tag", + push=False + ) + +``` + + +To avoid building an image, set `build=False` in the `.deploy` method. + +``` +if __name__ == "__main__": + buy.deploy( + name="my-code-baked-into-an-image-deployment", + work_pool_name="my-docker-pool", + image="my_registry/no-build-image:1.0", + build=False + ) + +``` + + +The specified image will need to be available in your deployment's execution environment for your flow code to be accessible. + +Prefect generates a Dockerfile for you that will build an image based off of one of Prefect's published images. The generated Dockerfile will copy the current directory into the Docker image and install any dependencies listed in a `requirements.txt` file. + +### Automatically build a custom Docker image with a local Dockerfile + +If you want to use a custom Dockerfile, you can specify the path to the Dockerfile with the `DeploymentImage` class: + + +```python custom_dockerfile.py +from prefect import flow +from prefect.deployments import DeploymentImage + + +@flow(log_prints=True) +def buy(): + print("Selling securities") + + +if __name__ == "__main__": + buy.deploy( + name="my-custom-dockerfile-deployment", + work_pool_name="my-docker-pool", + image=DeploymentImage( + name="my_image", + tag="deploy-guide", + dockerfile="Dockerfile" + ), + push=False +) + +``` + + +The `DeploymentImage` object allows for a great deal of image customization. + +For example, you can install a private Python package from GCP's artifact registry like this: + +Create a custom base Dockerfile. + +``` +FROM python:3.10 + +ARG AUTHED_ARTIFACT_REG_URL +COPY ./requirements.txt /requirements.txt + +RUN pip install --extra-index-url ${AUTHED_ARTIFACT_REG_URL} -r /requirements.txt + +``` + + +Create our deployment by leveraging the DeploymentImage class. + + +```python private-package.py + +from prefect import flow +from prefect.deployments.runner import DeploymentImage +from prefect.blocks.system import Secret +from my_private_package import do_something_cool + + +@flow(log_prints=True) +def my_flow(): + do_something_cool() + + +if __name__ == "__main__": + artifact_reg_url: Secret = Secret.load("artifact-reg-url") + + my_flow.deploy( + name="my-deployment", + work_pool_name="k8s-demo", + image=DeploymentImage( + name="my-image", + tag="test", + dockerfile="Dockerfile", + buildargs={"AUTHED_ARTIFACT_REG_URL": artifact_reg_url.get()}, + ), + ) + +``` + + +Note that we used a [Prefect Secret block](https://docs.prefect.io/concepts/blocks/) to load the URL configuration for the artifact registry above. + +See all the optional keyword arguments for the DeploymentImage class [here](https://docker-py.readthedocs.io/en/stable/images.html#docker.models.images.ImageCollection.build). + + +**Default Docker namespace** + +You can set the `PREFECT_DEFAULT_DOCKER_BUILD_NAMESPACE` setting to append a default Docker namespace to all images you build with `.deploy`. This is great if you use a private registry to store your images. + +To set a default Docker namespace for your current profile run: + +```bash +prefect config set PREFECT_DEFAULT_DOCKER_BUILD_NAMESPACE=/ + +``` + + +Once set, you can omit the namespace from your image name when creating a deployment: + +```python with_default_docker_namespace.py + +if __name__ == "__main__": + buy.deploy( + name="my-code-baked-into-an-image-deployment", + work_pool_name="my-docker-pool", + image="my_image:my_image_tag" + ) + +``` + + +The above code will build an image with the format `//my_image:my_image_tag` when `PREFECT_DEFAULT_DOCKER_BUILD_NAMESPACE` is set. + + + +While baking code into Docker images is a popular deployment option, many teams decide to store their workflow code in git-based storage, such as GitHub, Bitbucket, or Gitlab. Let's see how to do that next. + +### Store your code in git-based cloud storage + +If you don't specify an `image` argument for `.deploy`, then you need to specify where to pull the flow code from at runtime with the `from_source` method. + +Here's how we can pull our flow code from a GitHub repository. + + +```python git_storage.py + +from prefect import flow + +if __name__ == "__main__": + flow.from_source( + "https://github.com/my_github_account/my_repo/my_file.git", + entrypoint="flows/no-image.py:hello_world", + ).deploy( + name="no-image-deployment", + work_pool_name="my_pool", + build=False + ) + +``` + + +The `entrypoint` is the path to the file the flow is located in and the function name, separated by a colon. + +Alternatively, you could specify a git-based cloud storage URL for a Bitbucket or Gitlab repository. + +**Note** + +If you don't specify an image as part of your deployment creation, the image specified in the work pool will be used to run your flow. + +After creating a deployment you might change your flow code. Generally, you can just push your code to GitHub, without rebuilding your deployment. The exception is if something that the server needs to know about changes, such as the flow entrypoint parameters. Rerunning the Python script with `.deploy` will update your deployment on the server with the new flow code. + +If you need to provide additional configuration, such as specifying a private repository, you can provide a [`GitRepository`](https://docs.prefect.io/api-ref/prefect/flows/#prefect.runner.storage.GitRepository) object instead of a URL: + + +```python private_git_storage.py + +from prefect import flow +from prefect.runner.storage import GitRepository +from prefect.blocks.system import Secret + +if __name__ == "__main__": + flow.from_source( + source=GitRepository( + url="https://github.com/org/private-repo.git", + branch="dev", + credentials={ + "access_token": Secret.load("github-access-token") + } + ), + entrypoint="flows/no-image.py:hello_world", + ).deploy( + name="private-git-storage-deployment", + work_pool_name="my_pool", + build=False + ) + +``` + + +Note the use of the Secret block to load the GitHub access token. Alternatively, you could provide a username and password to the `username` and `password` fields of the `credentials` argument. + +### Store your code in cloud provider storage + +Another option for flow code storage is any [fsspec](https://filesystem-spec.readthedocs.io/en/latest/)\-supported storage location, such as AWS S3, GCP GCS, or Azure Blob Storage. + +For example, you can pass the S3 bucket path to `source`. + + +```python s3_storage.py + +from prefect import flow + +if __name__ == "__main__": + flow.from_source( + source="s3://my-bucket/my-folder", + entrypoint="flows.py:my_flow", + ).deploy( + name="deployment-from-aws-flow", + work_pool_name="my_pool", + ) + +``` + + +In the example above your credentials will be auto-discovered from your deployment creation environment and credentials will need to be available in your runtime environment. + +If you need additional configuration for your cloud-based storage - for example, with a private S3 Bucket - we recommend using a [storage block](https://docs.prefect.io/concepts/blocks/). A storage block also ensures your credentials will be available in both your deployment creation environment and your execution environment. + +Here's an example that uses an `S3Bucket` block from the [prefect-aws library](https://prefecthq.github.io/prefect-aws/). + + +```python s3_storage_auth.py + +from prefect import flow +from prefect_aws.s3 import S3Bucket + +if __name__ == "__main__": + flow.from_source( + source=S3Bucket.load("my-code-storage"), entrypoint="my_file.py:my_flow" + ).deploy(name="test-s3", work_pool_name="my_pool") + +``` + + +If you are familiar with the deployment creation mechanics with `.serve`, you will notice that `.deploy` is very similar. `.deploy` just requires a work pool name and has a number of parameters dealing with flow-code storage for Docker images. + +Unlike `.serve`, if you don't specify an image to use for your flow, you must to specify where to pull the flow code from at runtime with the `from_source` method, whereas `from_source` is optional with `.serve`. + +### Additional configuration with `.deploy` + +Our examples thus far have explored options for where to store flow code. Let's turn our attention to other deployment configuration options. + +To pass parameters to your flow, you can use the `parameters` argument in the `.deploy` method. Just pass in a dictionary of key-value pairs. + + +```python pass_params.py + +from prefect import flow + +@flow +def hello_world(name: str): + print(f"Hello, {name}!") + +if __name__ == "__main__": + hello_world.deploy( + name="pass-params-deployment", + work_pool_name="my_pool", + parameters=dict(name="Prefect"), + image="my_registry/my_image:my_image_tag", + ) + +``` + + +The `job_variables` parameter allows you to fine-tune the infrastructure settings for a deployment. The values passed in override default values in the specified work pool's [base job template](https://docs.prefect.io/concepts/work-pools/#base-job-template). + +You can override environment variables, such as `image_pull_policy` and `image`, for a specific deployment with the `job_variables` argument. + + +```python job_var_image_pull.py +if __name__ == "__main__": + get_repo_info.deploy( + name="my-deployment-never-pull", + work_pool_name="my-docker-pool", + job_variables={"image_pull_policy": "Never"}, + image="my-image:my-tag"", + push=False + ) + +``` + + +Similarly, you can override the environment variables specified in a work pool through the `job_variables` parameter: + + +```python job_var_env_vars.py + +if __name__ == "__main__": + get_repo_info.deploy( + name="my-deployment-never-pull", + work_pool_name="my-docker-pool", + job_variables={"env": {"EXTRA_PIP_PACKAGES": "boto3"} }, + image="my-image:my-tag"", + push=False + ) + +``` + + +The dictionary key "EXTRA\_PIP\_PACKAGES" denotes a special environment variable that Prefect will use to install additional Python packages at runtime. This approach is an alternative to building an image with a custom `requirements.txt` copied into it. + +For more information on overriding job variables see this [guide](https://docs.prefect.io/guides/deployment/overriding-job-variables/). + +### Working with multiple deployments with `deploy` + +You can create multiple deployments from one or more Python files that use `.deploy`. These deployments can be managed independently of one another, allowing you to deploy the same flow with different configurations in the same codebase. + +To create multiple work pool-based deployments at once you can use the `deploy` function, which is analogous to the `serve` function. + +``` +from prefect import deploy, flow + +@flow(log_prints=True) +def buy(): + print("Buying securities") + + +if __name__ == "__main__": + deploy( + buy.to_deployment(name="dev-deploy", work_pool_name="my-dev-work-pool"), + buy.to_deployment(name="prod-deploy", work_pool_name="my-prod-work-pool"), + image="my-registry/my-image:dev", + push=False, + ) + +``` + + +Note that in the example above we created two deployments from the same flow, but with different work pools. Alternatively, we could have created two deployments from different flows. + +``` +from prefect import deploy, flow + +@flow(log_prints=True) +def buy(): + print("Buying securities.") + +@flow(log_prints=True) +def sell(): + print("Selling securities.") + + +if __name__ == "__main__": + deploy( + buy.to_deployment(name="buy-deploy"), + sell.to_deployment(name="sell-deploy"), + work_pool_name="my-dev-work-pool" + image="my-registry/my-image:dev", + push=False, + ) + +``` + + +In the example above the code for both flows gets baked into the same image. + +We can specify that one or more flows should be pulled from a remote location at runtime by using the `from_source` method. Here's an example of deploying two flows, one defined locally and one defined in a remote repository: + +``` +from prefect import deploy, flow + + +@flow(log_prints=True) +def local_flow(): + print("I'm a flow!") + +if __name__ == "__main__": + deploy( + local_flow.to_deployment(name="example-deploy-local-flow"), + flow.from_source( + source="https://github.com/org/repo.git", + entrypoint="flows.py:my_flow", + ).to_deployment( + name="example-deploy-remote-flow", + ), + work_pool_name="my-work-pool", + image="my-registry/my-image:dev", + ) + +``` + + +You could pass any number of flows to the `deploy` function. This behavior is useful if using a monorepo approach to your workflows. + +Creating work pool-based deployments with prefect.yaml +---------------------------------------------------------------------------------------------------------------------------------- + +The `prefect.yaml` file is a YAML file describing base settings for your deployments, procedural steps for preparing deployments, and instructions for preparing the execution environment for a deployment run. + +You can initialize your deployment configuration, which creates the `prefect.yaml` file, by running the CLI command `prefect init` in any directory or repository that stores your flow code. + + +**Deployment configuration recipes** + +Prefect ships with many off-the-shelf "recipes" that allow you to get started with more structure within your `prefect.yaml` file; run `prefect init` to be prompted with available recipes in your installation. You can provide a recipe name in your initialization command with the `--recipe` flag, otherwise Prefect will attempt to guess an appropriate recipe based on the structure of your working directory (for example if you initialize within a `git` repository, Prefect will use the `git` recipe). + + +The `prefect.yaml` file contains deployment configuration for deployments created from this file, default instructions for how to build and push any necessary code artifacts (such as Docker images), and default instructions for pulling a deployment in remote execution environments (e.g., cloning a GitHub repository). + +Any deployment configuration can be overridden via options available on the `prefect deploy` CLI command when creating a deployment. + + +**`prefect.yaml` file flexibility** + +In older versions of Prefect, this file had to be in the root of your repository or project directory and named `prefect.yaml`. Now this file can be located in a directory outside the project or a subdirectory inside the project. It can be named differently, provided the filename ends in `.yaml`. You can even have multiple `prefect.yaml` files with the same name in different directories. By default, `prefect deploy` will use a `prefect.yaml` file in the project's root directory. To use a custom deployment configuration file, supply the new `--prefect-file` CLI argument when running the `deploy` command from the root of your project directory: + +`prefect deploy --prefect-file path/to/my_file.yaml` + + +The base structure for `prefect.yaml` is as follows: + +``` +# generic metadata +prefect-version: null +name: null + +# preparation steps +build: null +push: null + +# runtime steps +pull: null + +# deployment configurations +deployments: +- # base metadata + name: null + version: null + tags: [] + description: null + schedule: null + + # flow-specific fields + entrypoint: null + parameters: {} + + # infra-specific fields + work_pool: + name: null + work_queue_name: null + job_variables: {} + +``` + + +The metadata fields are always pre-populated for you. These fields are for bookkeeping purposes only. The other sections are pre-populated based on recipe; if no recipe is provided, Prefect will attempt to guess an appropriate one based on local configuration. + +You can create deployments via the CLI command `prefect deploy` without ever needing to alter the `deployments` section of your `prefect.yaml` file — the `prefect deploy` command will help in deployment creation via interactive prompts. The `prefect.yaml` file facilitates version-controlling your deployment configuration and managing multiple deployments. + +### Deployment actions + +Deployment actions defined in your `prefect.yaml` file control the lifecycle of the creation and execution of your deployments. The three actions available are `build`, `push`, and `pull`. `pull` is the only required deployment action — it is used to define how Prefect will pull your deployment in remote execution environments. + +Each action is defined as a list of steps that are executing in sequence. + +Each step has the following format: + +``` +section: +- prefect_package.path.to.importable.step: + id: "step-id" # optional + requires: "pip-installable-package-spec" # optional + kwarg1: value + kwarg2: more-values + +``` + + +Every step can optionally provide a `requires` field that Prefect will use to auto-install in the event that the step cannot be found in the current environment. Each step can also specify an `id` for the step which is used when referencing step outputs in later steps. The additional fields map directly onto Python keyword arguments to the step function. Within a given section, steps always run in the order that they are provided within the `prefect.yaml` file. + +**Deployment Instruction Overrides** + +`build`, `push`, and `pull` sections can all be overridden on a per-deployment basis by defining `build`, `push`, and `pull` fields within a deployment definition in the `prefect.yaml` file. + +The `prefect deploy` command will use any `build`, `push`, or `pull` instructions provided in a deployment's definition in the `prefect.yaml` file. + +This capability is useful with multiple deployments that require different deployment instructions. + +### The build action + +The build section of `prefect.yaml` is where any necessary side effects for running your deployments are built - the most common type of side effect produced here is a Docker image. If you initialize with the docker recipe, you will be prompted to provide required information, such as image name and tag: + +``` +prefect init --recipe docker +>> image_name: < insert image name here > +>> tag: < insert image tag here > + +``` + + +**Use `--field` to avoid the interactive experience** + +We recommend that you only initialize a recipe when you are first creating your deployment structure, and afterwards store your configuration files within version control. However, sometimes you may need to initialize programmatically and avoid the interactive prompts. +To do so, provide all required fields for your recipe using the `--field` flag: + +```bash +prefect init --recipe docker \ + --field image_name=my-repo/my-image \ + --field tag=my-tag + +``` + + +``` +build: +- prefect_docker.deployments.steps.build_docker_image: + requires: prefect-docker>=0.3.0 + image_name: my-repo/my-image + tag: my-tag + dockerfile: auto + push: true + +``` + + +Once you've confirmed that these fields are set to their desired values, this step will automatically build a Docker image with the provided name and tag and push it to the repository referenced by the image name. +[As the `prefect-docker` package documentation notes](https://prefecthq.github.io/prefect-docker/deployments/steps/#prefect_docker.deployments.steps.BuildDockerImageResult), this step produces a few fields that can optionally be used in future steps or within `prefect.yaml` as template values. +It is best practice to use `{{ image }}` within `prefect.yaml` (specifically the work pool's job variables section) so that you don't risk having your build step and deployment specification get out of sync with hardcoded values. + +**Note** + +Note that in the build step example above, we relied on the `prefect-docker` package; in cases that deal with external services, additional packages are often required and will be auto-installed for you. + + + +**Pass output to downstream steps** + +Each deployment action can be composed of multiple steps. For example, if you wanted to build a Docker image tagged with the current commit hash, you could use the `run_shell_script` step and feed the output into the `build_docker_image` step: + +```js +build: + - prefect.deployments.steps.run_shell_script: + id: get-commit-hash + script: git rev-parse --short HEAD + stream_output: false + - prefect_docker.deployments.steps.build_docker_image: + requires: prefect-docker + image_name: my-image + image_tag: "{{ get-commit-hash.stdout }}" + dockerfile: auto + +``` + + +Note that the `id` field is used in the `run_shell_script` step so that its output can be referenced in the next step. + + +### The push action + +The push section is most critical for situations in which code is not stored on persistent filesystems or in version control. In this scenario, code is often pushed and pulled from a Cloud storage bucket of some kind (e.g., S3, GCS, Azure Blobs, etc.). The push section allows users to specify and customize the logic for pushing this code repository to arbitrary remote locations. + +For example, a user wishing to store their code in an S3 bucket and rely on default worker settings for its runtime environment could use the `s3` recipe: + +``` +prefect init --recipe s3 +>> bucket: < insert bucket name here > + +``` + + +Inspecting our newly created `prefect.yaml` file we find that the `push` and `pull` sections have been templated out for us as follows: + +```js +push: +- prefect_aws.deployments.steps.push_to_s3: + id: push-code + requires: prefect-aws>=0.3.0 + bucket: my-bucket + folder: project-name + credentials: null + +pull: +- prefect_aws.deployments.steps.pull_from_s3: + requires: prefect-aws>=0.3.0 + bucket: my-bucket + folder: "{{ push-code.folder }}" + credentials: null + +``` + + +The bucket has been populated with our provided value (which also could have been provided with the `--field` flag); note that the `folder` property of the `push` step is a template - the `pull_from_s3` step outputs both a `bucket` value as well as a `folder` value that can be used to template downstream steps. Doing this helps you keep your steps consistent across edits. + +As discussed above, if you are using [blocks](https://docs.prefect.io/concepts/blocks/), the credentials section can be templated with a block reference for secure and dynamic credentials access: + +``` +push: +- prefect_aws.deployments.steps.push_to_s3: + requires: prefect-aws>=0.3.0 + bucket: my-bucket + folder: project-name + credentials: "{{ prefect.blocks.aws-credentials.dev-credentials }}" + +``` + + +Anytime you run `prefect deploy`, this `push` section will be executed upon successful completion of your `build` section. For more information on the mechanics of steps, [see below](#deployment-mechanics). + +### The pull action + +The pull section is the most important section within the `prefect.yaml` file. It contains instructions for preparing your flows for a deployment run. These instructions will be executed each time a deployment created within this folder is run via a worker. + +There are three main types of steps that typically show up in a `pull` section: + +* `set_working_directory`: this step simply sets the working directory for the process prior to importing your flow +* `git_clone`: this step clones the provided repository on the provided branch +* `pull_from_{cloud}`: this step pulls the working directory from a Cloud storage location (e.g., S3) + + +**Use block and variable references** + +All [block and variable references](#templating-options) within your pull step will remain unresolved until runtime and will be pulled each time your deployment is run. This allows you to avoid storing sensitive information insecurely; it also allows you to manage certain types of configuration from the API and UI without having to rebuild your deployment every time. + + +Below is an example of how to use an existing `GitHubCredentials` block to clone a private GitHub repository: + +```js +pull: + - prefect.deployments.steps.git_clone: + repository: https://github.com/org/repo.git + credentials: "{{ prefect.blocks.github-credentials.my-credentials }}" + +``` + + +Alternatively, you can specify a `BitBucketCredentials` or `GitLabCredentials` block to clone from Bitbucket or GitLab. In lieu of a credentials block, you can also provide a GitHub, GitLab, or Bitbucket token directly to the 'access\_token\` field. You can use a Secret block to do this securely: + +```js +pull: + - prefect.deployments.steps.git_clone: + repository: https://bitbucket.org/org/repo.git + access_token: "{{ prefect.blocks.secret.bitbucket-token }}" + +``` + + +### Utility steps + +Utility steps can be used within a build, push, or pull action to assist in managing the deployment lifecycle: + +* `run_shell_script` allows for the execution of one or more shell commands in a subprocess, and returns the standard output and standard error of the script. This step is useful for scripts that require execution in a specific environment, or those which have specific input and output requirements. + +Here is an example of retrieving the short Git commit hash of the current repository to use as a Docker image tag: + +```js +build: + - prefect.deployments.steps.run_shell_script: + id: get-commit-hash + script: git rev-parse --short HEAD + stream_output: false + - prefect_docker.deployments.steps.build_docker_image: + requires: prefect-docker>=0.3.0 + image_name: my-image + tag: "{{ get-commit-hash.stdout }}" + dockerfile: auto + +``` + + +**Provided environment variables are not expanded by default** + +To expand environment variables in your shell script, set `expand_env_vars: true` in your `run_shell_script` step. For example: + +```js +- prefect.deployments.steps.run_shell_script: + id: get-user + script: echo $USER + stream_output: true + expand_env_vars: true + +``` + + +Without `expand_env_vars: true`, the above step would return a literal string `$USER` instead of the current user. + + +* `pip_install_requirements` installs dependencies from a `requirements.txt` file within a specified directory. + +Below is an example of installing dependencies from a `requirements.txt` file after cloning: + +```js +pull: + - prefect.deployments.steps.git_clone: + id: clone-step # needed in order to be referenced in subsequent steps + repository: https://github.com/org/repo.git + - prefect.deployments.steps.pip_install_requirements: + directory: {{ clone-step.directory }} # `clone-step` is a user-provided `id` field + requirements_file: requirements.txt + +``` + + +Below is an example that retrieves an access token from a 3rd party Key Vault and uses it in a private clone step: + +```js +pull: +- prefect.deployments.steps.run_shell_script: + id: get-access-token + script: az keyvault secret show --name --vault-name --query "value" --output tsv + stream_output: false +- prefect.deployments.steps.git_clone: + repository: https://bitbucket.org/samples/deployments.git + branch: master + access_token: "{{ get-access-token.stdout }}" + +``` + + +You can also run custom steps by packaging them. In the example below, `retrieve_secrets` is a custom python module that has been packaged into the default working directory of a Docker image (which is /opt/prefect by default). `main` is the function entry point, which returns an access token (e.g. `return {"access_token": access_token}`) like the preceding example, but utilizing the Azure Python SDK for retrieval. + +```js +- retrieve_secrets.main: + id: get-access-token +- prefect.deployments.steps.git_clone: + repository: https://bitbucket.org/samples/deployments.git + branch: master + access_token: '{{ get-access-token.access_token }}' + +``` + + +### Templating options + +Values that you place within your `prefect.yaml` file can reference dynamic values in several different ways: + +* **step outputs**: every step of both `build` and `push` produce named fields such as `image_name`; you can reference these fields within `prefect.yaml` and `prefect deploy` will populate them with each call. References must be enclosed in double brackets and be of the form `"{{ field_name }}"` +* **blocks**: [Prefect blocks](https://docs.prefect.io/concepts/blocks) can also be referenced with the special syntax `{{ prefect.blocks.block_type.block_slug }}`. It is highly recommended that you use block references for any sensitive information (such as a GitHub access token or any credentials) to avoid hardcoding these values in plaintext +* **variables**: [Prefect variables](https://docs.prefect.io/concepts/variables) can also be referenced with the special syntax `{{ prefect.variables.variable_name }}`. Variables can be used to reference non-sensitive, reusable pieces of information such as a default image name or a default work pool name. +* **environment variables**: you can also reference environment variables with the special syntax `{{ $MY_ENV_VAR }}`. This is especially useful for referencing environment variables that are set at runtime. + +As an example, consider the following `prefect.yaml` file: + +```js +build: +- prefect_docker.deployments.steps.build_docker_image: + id: build-image + requires: prefect-docker>=0.3.0 + image_name: my-repo/my-image + tag: my-tag + dockerfile: auto + push: true + +deployments: +- # base metadata + name: null + version: "{{ build-image.tag }}" + tags: + - "{{ $my_deployment_tag }}" + - "{{ prefect.variables.some_common_tag }}" + description: null + schedule: null + + # flow-specific fields + entrypoint: null + parameters: {} + + # infra-specific fields + work_pool: + name: "my-k8s-work-pool" + work_queue_name: null + job_variables: + image: "{{ build-image.image }}" + cluster_config: "{{ prefect.blocks.kubernetes-cluster-config.my-favorite-config }}" + +``` + + +So long as our `build` steps produce fields called `image_name` and `tag`, every time we deploy a new version of our deployment, the `{{ build-image.image }}` variable will be dynamically populated with the relevant values. + + +**Docker step** + +The most commonly used build step is [`prefect_docker.deployments.steps.build_docker_image`](https://docs.prefect.io/guides/deployment/docker/) which produces both the image_name and tag fields. + +For an example, [check out the deployments tutorial](https://docs.prefect.io/guides/deployment/docker/). + + + + + +A `prefect.yaml` file can have multiple deployment configurations that control the behavior of several deployments. These deployments can be managed independently of one another, allowing you to deploy the same flow with different configurations in the same codebase. + +### Working with multiple deployments with prefect.yaml + +Prefect supports multiple deployment declarations within the `prefect.yaml` file. This method of declaring multiple deployments allows the configuration for all deployments to be version controlled and deployed with a single command. + +New deployment declarations can be added to the `prefect.yaml` file by adding a new entry to the `deployments` list. Each deployment declaration must have a unique `name` field which is used to select deployment declarations when using the `prefect deploy` command. + + +**Warning** + +When using a `prefect.yaml` file that is in another directory or differently named, remember that the value for the deployment `entrypoint` must be relative to the root directory of the project. + +For example, consider the following `prefect.yaml` file: + +```js +build: ... +push: ... +pull: ... + +deployments: +- name: deployment-1 + entrypoint: flows/hello.py:my_flow + parameters: + number: 42, + message: Don't panic! + work_pool: + name: my-process-work-pool + work_queue_name: primary-queue + +- name: deployment-2 + entrypoint: flows/goodbye.py:my_other_flow + work_pool: + name: my-process-work-pool + work_queue_name: secondary-queue + +- name: deployment-3 + entrypoint: flows/hello.py:yet_another_flow + work_pool: + name: my-docker-work-pool + work_queue_name: tertiary-queue + +``` + + +This file has three deployment declarations, each referencing a different flow. Each deployment declaration has a unique `name` field and can be deployed individually by using the `--name` flag when deploying. + +For example, to deploy `deployment-1` you would run: + +``` +prefect deploy --name deployment-1 + +``` + + +To deploy multiple deployments you can provide multiple `--name` flags: + +``` +prefect deploy --name deployment-1 --name deployment-2 + +``` + + +To deploy multiple deployments with the same name, you can prefix the deployment name with its flow name: + +``` +prefect deploy --name my_flow/deployment-1 --name my_other_flow/deployment-1 + +``` + + +To deploy all deployments you can use the `--all` flag: + +To deploy deployments that match a pattern you can run: + +``` +prefect deploy -n my-flow/* -n *dev/my-deployment -n dep*prod + +``` + + +The above command will deploy all deployments from the flow `my-flow`, all flows ending in `dev` with a deployment named `my-deployment`, and all deployments starting with `dep` and ending in `prod`. + +**CLI Options When Deploying Multiple Deployments** + + +When deploying more than one deployment with a single `prefect deploy` command, any additional attributes provided via the CLI will be ignored. + +To provide overrides to a deployment via the CLI, you must deploy that deployment individually. + +### Reusing configuration across deployments + +Because a `prefect.yaml` file is a standard YAML file, you can use [YAML aliases](https://yaml.org/spec/1.2.2/#71-alias-nodes) to reuse configuration across deployments. + +This functionality is useful when multiple deployments need to share the work pool configuration, deployment actions, or other configurations. + +You can declare a YAML alias by using the `&{alias_name}` syntax and insert that alias elsewhere in the file with the `*{alias_name}` syntax. When aliasing YAML maps, you can also override specific fields of the aliased map by using the `<<: *{alias_name}` syntax and adding additional fields below. + +We recommend adding a `definitions` section to your `prefect.yaml` file at the same level as the `deployments` section to store your aliases. + +For example, consider the following `prefect.yaml` file: + +```js +build: ... +push: ... +pull: ... + +definitions: + work_pools: + my_docker_work_pool: &my_docker_work_pool + name: my-docker-work-pool + work_queue_name: default + job_variables: + image: "{{ build-image.image }}" + schedules: + every_ten_minutes: &every_10_minutes + interval: 600 + actions: + docker_build: &docker_build + - prefect_docker.deployments.steps.build_docker_image: &docker_build_config + id: build-image + requires: prefect-docker>=0.3.0 + image_name: my-example-image + tag: dev + dockerfile: auto + push: true + +deployments: +- name: deployment-1 + entrypoint: flows/hello.py:my_flow + schedule: *every_10_minutes + parameters: + number: 42, + message: Don't panic! + work_pool: *my_docker_work_pool + build: *docker_build # Uses the full docker_build action with no overrides + +- name: deployment-2 + entrypoint: flows/goodbye.py:my_other_flow + work_pool: *my_docker_work_pool + build: + - prefect_docker.deployments.steps.build_docker_image: + <<: *docker_build_config # Uses the docker_build_config alias and overrides the dockerfile field + dockerfile: Dockerfile.custom + +- name: deployment-3 + entrypoint: flows/hello.py:yet_another_flow + schedule: *every_10_minutes + work_pool: + name: my-process-work-pool + work_queue_name: primary-queue + +``` + + +In the above example, we are using YAML aliases to reuse work pool, schedule, and build configuration across multiple deployments: + +* `deployment-1` and `deployment-2` are using the same work pool configuration +* `deployment-1` and `deployment-3` are using the same schedule +* `deployment-1` and `deployment-2` are using the same build deployment action, but `deployment-2` is overriding the `dockerfile` field to use a custom Dockerfile + +### Deployment declaration reference + +#### Deployment fields + +Below are fields that can be added to each deployment declaration. + + +| Property | Description | +|----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `name` | The name to give to the created deployment. Used with the prefect deploy command to create or update specific deployments. | +| `version` | An optional version for the deployment. | +| `tags` | A list of strings to assign to the deployment as tags. | +| `description` | An optional description for the deployment. | +| `schedule` | An optional [schedule](/2.19.1/concepts/schedules) to assign to the deployment. Fields for this section are documented in the [Schedule Fields](/2.19.1/how-to-guides/work-pools/deploying-flows#schedule-fields) section. | +| `triggers` | An optional array of [triggers](https://docs.prefect.io/concepts/deployments/#create-a-flow-run-with-an-event-trigger) to assign to the deployment | +| `entrypoint` | Required path to the `.py` file containing the flow you want to deploy (relative to the root directory of your development folder) combined with the name of the flow function. Should be in the format `path/to/file.py:flow_function_name`. | +| `parameters` | Optional default values to provide for the parameters of the deployed flow. Should be an object with key/value pairs. | +| `enforce_parameter_schema` | Boolean flag that determines whether the API should validate the parameters passed to a flow run against the parameter schema generated for the deployed flow. | +| `work_pool` | Information on where to schedule flow runs for the deployment. Fields for this section are documented in the [Work Pool Fields](2.19.1/how-to-guides/work-pools/deploying-flows/#work-pool-fields) section. | + +#### Schedule fields + +Below are fields that can be added to a deployment declaration's `schedule` section. + + +| Property | Description | +|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `interval` | Number of seconds indicating the time between flow runs. Cannot be used in conjunction with `cron` or `rrule`. | +| `anchor_date` | Datetime string indicating the starting or "anchor" date to begin the schedule. If no `anchor_date` is supplied, the current UTC time is used. Can only be used with `interval`. | +| `timezone` | String name of a time zone, used to enforce localization behaviors like DST boundaries. See the [IANA Time Zone Database](https://www.iana.org/time-zones) for valid time zones. | +| `cron` | A valid cron string. Cannot be used in conjunction with `interval` or `rrule`. | +| `day_or` | Boolean indicating how croniter handles day and day_of_week entries. Must be used with `cron`. Defaults to `True`. | +| `rrule` | String representation of an RRule schedule. See the [`rrulestr examples`](https://dateutil.readthedocs.io/en/stable/rrule.html#rrulestr-examples) for syntax. Cannot be used in conjunction with `interval` or `cron`. | + +For more information about schedules, see the [Schedules](https://docs.prefect.io/concepts/schedules/#creating-schedules-through-a-deployment-yaml-files-schedule-section) concept doc. + +#### Work pool fields + +Below are fields that can be added to a deployment declaration's `work_pool` section. + +| Property | Description | +|-------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `name` | The name of the work pool to schedule flow runs in for the deployment. | +| `work_queue_name` | The name of the work queue within the specified work pool to schedule flow runs in for the deployment. If not provided, the default queue for the specified work pool will be used. | +| `job_variables` | Values used to override the default values in the specified work pool's [base job template]((/2.19.1/concepts/work-pools--workers)). Maps directly to a created deployments `infra_overrides` attribute. | + +#### Deployment mechanics + +Anytime you run `prefect deploy` in a directory that contains a `prefect.yaml` file, the following actions are taken in order: + +* The `prefect.yaml` file is loaded. First, the `build` section is loaded and all variable and block references are resolved. The steps are then run in the order provided. +* Next, the `push` section is loaded and all variable and block references are resolved; the steps within this section are then run in the order provided +* Next, the `pull` section is templated with any step outputs but _is not run_. Note that block references are _not_ hydrated for security purposes - block references are always resolved at runtime +* Next, all variable and block references are resolved with the deployment declaration. All flags provided via the `prefect deploy` CLI are then overlaid on the values loaded from the file. +* The final step occurs when the fully realized deployment specification is registered with the Prefect API + + +**Deployment Instruction Overrides** + +The `build`, `push`, and `pull` sections in deployment definitions take precedence over the corresponding sections above them in `prefect.yaml`. + + +Each time a step is run, the following actions are taken in order: + +* The step's inputs and block / variable references are resolved (see [the templating documentation above](#templating-options) for more details). +* The step's function is imported; if it cannot be found, the special `requires` keyword is used to install the necessary packages +* The step's function is called with the resolved inputs. +* The step's output is returned and used to resolve inputs for subsequent steps. + +Next steps +------------------------------------------- + +Now that you are familiar with creating deployments, you may want to explore infrastructure options for running your deployments: + +* [Managed work pools](https://docs.prefect.io/guides/managed-execution/) +* [Push work pools](https://docs.prefect.io/guides/deployment/push-work-pools/) +* [Kubernetes work pools](https://docs.prefect.io/guides/deployment/kubernetes/) +* [Serverless hybrid work pools](https://docs.prefect.io/guides/deployment/serverless-workers/) \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/work-pools/flow-code-storage.mdx b/docs/2.19.x/how-to-guides/work-pools/flow-code-storage.mdx new file mode 100644 index 000000000000..f831a359c2e2 --- /dev/null +++ b/docs/2.19.x/how-to-guides/work-pools/flow-code-storage.mdx @@ -0,0 +1,316 @@ +--- +title: Flow Code Storage +--- + +Where to Store Your Flow Code +--------------------------------------------------------------------------------- + +When a deployment is run, the execution environment needs access to the flow code. Flow code is not stored in a Prefect server instance or in Prefect Cloud. + +You have several flow code storage options. + +This guide focusses on deployments created with the interactive CLI experience or a prefect.yaml file. If you'd like to create your deployments using Python code, see the discussion of flow code storage in the [`.deploy` section of Deploying Flows to Work pools and Workers guide](https://docs.prefect.io/guides/prefect-deploy/#creating-work-pool-based-deployments-with-deploy). + +Option 1: Git-based storage +---------------------------------------------------------------------------- + +Git-based version control platforms are popular locations for code storage. They provide redundancy, version control, and easier collaboration. + +[GitHub](https://github.com/) is the most popular cloud-based repository hosting provider. [GitLab](https://www.gitlab.com/) and [Bitbucket](https://bitbucket.org/) are other popular options. Prefect supports each of these platforms. + +### Creating a deployment with git-based storage + +Run `prefect deploy` from the root directory of the git repository and create a new deployment. You will see a series of prompts. Select that you want to create a new deployment, select the flow code entrypoint, and name your deployment. + +Prefect detects that you are in a git repository and asks if you want to store your flow code in a git repository. Select "y" and you will be prompted to confirm the URL of your git repository and the branch name, as in the example below: + +``` +? Your Prefect workers will need access to this flow's code in order to run it. +Would you like your workers to pull your flow code from its remote repository when running this flow? [y/n] (y): +? Is https://github.com/my_username/my_repo.git the correct URL to pull your flow code from? [y/n] (y): +? Is main the correct branch to pull your flow code from? [y/n] (y): +? Is this a private repository? [y/n]: y + +``` + + +In this example, the git repository is hosted on GitHub. If you are using Bitbucket or GitLab, the URL will match your provider. If the repository is public, enter "n" and you are on your way. + +If the repository is private, you can enter a token to access your private repository. This token will be saved in an encrypted Prefect Secret block. + +``` +? Please enter a token that can be used to access your private repository. This token will be saved as a Secret block via the Prefect API: "123_abc_this_is_my_token" + +``` + + +Verify that you have a new Secret block in your active workspace named in the format "deployment-my-deployment-my-flow-name-repo-token". + +Creating access tokens differs for each provider. + + + +We recommend using HTTPS with [fine-grained Personal Access Tokens](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-fine-grained-personal-access-token) so that you can limit access by repository. See the GitHub docs for [Personal Access Tokens (PATs)](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token). + +Under _Your Profile->Developer Settings->Personal access tokens->Fine-grained token_ choose _Generate New Token_ and fill in the required fields. Under _Repository access_ choose _Only select repositories_ and grant the token permissions for _Contents_. + + + +We recommend using HTTPS with Repository, Project, or Workspace [Access Tokens](https://support.atlassian.com/bitbucket-cloud/docs/access-tokens/). + +You can create a Repository Access Token with Scopes->Repositories->Read. + +Bitbucket requires you prepend the token string with `x-token-auth:` So the full string looks like `x-token-auth:abc_123_this_is_my_token`. + + + + + +We recommend using HTTPS with [Project Access Tokens](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html). + +In your repository in the GitLab UI, select _Settings->Repository->Project Access Tokens_ and check _read\_repository_ under _Select scopes_. + + + + + + +If you want to configure a Secret block ahead of time, create the block via code or the Prefect UI and reference it in your `prefect.yaml` file. + +``` +pull: + - prefect.deployments.steps.git_clone: + repository: https://gitlab.com/org/my-private-repo.git + access_token: "{{ prefect.blocks.secret.my-block-name }}" + +``` + + +Alternatively, you can create a Credentials block ahead of time and reference it in the `prefect.yaml` pull step. + + + +1. Install the Prefect-Github library with `pip install -U prefect-github` +2. Register the blocks in that library to make them available on the server with `prefect block register -m prefect_github`. +3. Create a GitHub Credentials block via code or the Prefect UI and reference it as shown: + +``` +pull: + - prefect.deployments.steps.git_clone: + repository: https://github.com/org/my-private-repo.git + credentials: "{{ prefect.blocks.github-credentials.my-block-name }}" + +``` + + +1. Install the relevant library with `pip install -U prefect-bitbucket` +2. Register the blocks in that library with `prefect block register -m prefect_bitbucket` +3. Create a Bitbucket credentials block via code or the Prefect UI and reference it as shown: + +``` +pull: + - prefect.deployments.steps.git_clone: + repository: https://bitbucket.org/org/my-private-repo.git + credentials: "{{ prefect.blocks.bitbucket-credentials.my-block-name }}" + +``` + + +1. Install the relevant library with `pip install -U prefect-gitlab` +2. Register the blocks in that library with `prefect block register -m prefect_gitlab` +3. Create a GitLab credentials block via code or the Prefect UI and reference it as shown: + +``` +pull: + - prefect.deployments.steps.git_clone: + repository: https://gitlab.com/org/my-private-repo.git + credentials: "{{ prefect.blocks.gitlab-credentials.my-block-name }}" + +``` + + + + + + + + + + + + + + +**Push your code** + +When you make a change to your code, Prefect does not push your code to your git-based version control platform. You need to push your code manually or as part of your CI/CD pipeline. This design decision is an intentional one to avoid confusion about the git history and push process. + + +Option 2: Docker-based storage +---------------------------------------------------------------------------------- + +Another popular way to store your flow code is to include it in a Docker image. All work pool options except Process and Prefect Managed work pools allow you to bake your code into a Docker image. + +1. Run `prefect init` in the root of your repository and choose `docker` for the project name and answer the prompts to create a `prefect.yaml` file with a build step that will create a Docker image with the flow code built in. See the [Workers and Work Pools page of the tutorial](https://docs.prefect.io/tutorial/workers/) for more info. +2. Run `prefect deploy` from the root of your repository to create a deployment. +3. When a deployment runs the worker pulls the Docker image and spins up a container. +4. The flow code baked into the image runs inside the container. + +**CI/CD may not require push or pull steps** + + +You don't need push or pull steps in the `prefect.yaml` file if using CI/CD to build a Docker image outside of Prefect. Instead, the work pool can reference the image directly. + + +Option 3: Cloud-provider storage +-------------------------------------------------------------------------------------- + +You can store your code in an AWS S3 bucket, Azure Blob Storage container, or GCP GCS bucket and specify the destination directly in the `push` and `pull` steps of your `prefect.yaml` file. + +To create a templated `prefect.yaml` file run `prefect init` and select the recipe for the applicable cloud-provider storage. Below are the recipe options and the relevant portions of the `prefect.yaml` file. + + + +Choose `s3Bucket` as the recipe and enter the bucket name when prompted. + +``` +# push section allows you to manage if and how this project is uploaded to remote locations +push: +- prefect_aws.deployments.steps.push_to_s3: + id: push_code + requires: prefect-aws>=0.3.4 + bucket: my-bucket + folder: my-folder + credentials: "{{ prefect.blocks.aws-credentials.my-credentials-block }}" # if private + +# pull section allows you to provide instructions for cloning this project in remote locations +pull: +- prefect_aws.deployments.steps.pull_from_s3: + id: pull_code + requires: prefect-aws>=0.3.4 + bucket: '{{ push_code.bucket }}' + folder: '{{ push_code.folder }}' + credentials: "{{ prefect.blocks.aws-credentials.my-credentials-block }}" # if private + +``` + + +If the bucket requires authentication to access it, you can do the following: + +1. Install the [Prefect-AWS](https://prefecthq.github.io/prefect-aws/) library with `pip install -U prefect-aws` +2. Register the blocks in Prefect-AWS with `prefect block register -m prefect_aws` +3. Create a user with a role with read and write permissions to access the bucket. If using the UI, create an access key pair with _IAM->Users->Security credentials->Access keys->Create access key_. Choose _Use case->Other_ and then copy the _Access key_ and _Secret access key_ values. +4. Create an AWS Credentials block via code or the Prefect UI. In addition to the block name, most users will fill in the _AWS Access Key ID_ and _AWS Access Key Secret_ fields. +5. Reference the block as shown in the push and pull steps + + + +Choose `azure` as the recipe and enter the container name when prompted. + +``` +# push section allows you to manage if and how this project is uploaded to remote locations +push: +- prefect_azure.deployments.steps.push_to_azure_blob_storage: + id: push_code + requires: prefect-azure>=0.2.8 + container: my-prefect-azure-container + folder: my-folder + credentials: "{{ prefect.blocks.azure-blob-storage-credentials.my-credentials-block }}" # if private + +# pull section allows you to provide instructions for cloning this project in remote locations +pull: +- prefect_azure.deployments.steps.pull_from_azure_blob_storage: + id: pull_code + requires: prefect-azure>=0.2.8 + container: '{{ push_code.container }}' + folder: '{{ push_code.folder }}' + credentials: "{{ prefect.blocks.azure-blob-storage-credentials.my-credentials-block }}" # if private + +``` + + +If the blob requires authentication to access it, you can do the following: + +1. Install the [Prefect-Azure](https://prefecthq.github.io/prefect-azure/) library with `pip install -U prefect-azure` +2. Register the blocks in Prefect-Azure with `prefect block register -m prefect_azure` +3. Create an access key for a role with sufficient (read and write) permissions to access the blob. A connection string that will contain all needed information can be created in the UI under _Storage Account->Access keys_. +4. Create an Azure Blob Storage Credentials block via code or the Prefect UI. Enter a name for the block and paste the connection string into the _Connection String_ field. +5. Reference the block as shown in the push and pull steps above. + + + +Choose \`gcs\`\` as the recipe and enter the bucket name when prompted. + +``` +# push section allows you to manage if and how this project is uploaded to remote locations +push: +- prefect_gcp.deployment.steps.push_to_gcs: + id: push_code + requires: prefect-gcp>=0.4.3 + bucket: my-bucket + folder: my-folder + credentials: "{{ prefect.blocks.gcp-credentials.my-credentials-block }}" # if private + +# pull section allows you to provide instructions for cloning this project in remote locations +pull: +- prefect_gcp.deployment.steps.pull_from_gcs: + id: pull_code + requires: prefect-gcp>=0.4.3 + bucket: '{{ push_code.bucket }}' + folder: '{{ pull_code.folder }}' + credentials: "{{ prefect.blocks.gcp-credentials.my-credentials-block }}" # if private + +``` + + +If the bucket requires authentication to access it, you can do the following: + +1. Install the [Prefect-GCP](https://prefecthq.github.io/prefect-azure/) library with `pip install -U prefect-gcp` +2. Register the blocks in Prefect-GCP with `prefect block register -m prefect_gcp` +3. Create a service account in GCP for a role with read and write permissions to access the bucket contents. If using the GCP console, go to _IAM & Admin->Service accounts->Create service account_. After choosing a role with the required permissions, see your service account and click on the three dot menu in the _Actions_ column. Select _Manage Keys->ADD KEY->Create new key->JSON_. Download the JSON file. +4. Create a GCP Credentials block via code or the Prefect UI. Enter a name for the block and paste the entire contents of the JSON key file into the _Service Account Info_ field. +5. Reference the block as shown in the push and pull steps above. + + + + + + + +Another option for authentication is for the [worker](https://docs.prefect.io/concepts/work-pools/#worker-overview) to have access to the storage location at runtime via SSH keys. + +Alternatively, you can inject environment variables into your deployment like this example that uses an environment variable named `CUSTOM_FOLDER`: + +``` + push: + - prefect_gcp.deployment.steps.push_to_gcs: + id: push_code + requires: prefect-gcp>=0.4.3 + bucket: my-bucket + folder: '{{ $CUSTOM_FOLDER }}' + +``` + + +Include or exclude files from storage +------------------------------------------------------------------------------------------------- + +By default, Prefect uploads all files in the current folder to the configured storage location when you create a deployment. + +When using a git repository, Docker image, or cloud-provider storage location, you may want to exclude certain files or directories. + +* If you are familiar with git you are likely familiar with the [`.gitignore`](https://git-scm.com/docs/gitignore) file. +* If you are familiar with Docker you are likely familiar with the [`.dockerignore`](https://docs.docker.com/engine/reference/builder/#dockerignore-file) file. +* For cloud-provider storage the `.prefectignore` file serves the same purpose and follows a similar syntax as those files. So an entry of `*.pyc` will exclude all `.pyc` files from upload. + +Other code storage creation methods +--------------------------------------------------------------------------------------------- + +In earlier versions of Prefect [storage blocks](https://docs.prefect.io/concepts/blocks/) were the recommended way to store flow code. Storage blocks are deprecated. + +As shown above, repositories can be referenced directly through interactive prompts with `prefect deploy` or in a `prefect.yaml`. When authentication is needed, Secret or Credential blocks can be referenced, and in some cases, created automatically through interactive deployment creation prompts. + +Conclusion +------------------------------------------- + +You've seen options for storing your flow code. For easier version control, we recommend using Docker-based storage or git-based storage for your production deployments. \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/work-pools/job-variable-overrides.mdx b/docs/2.19.x/how-to-guides/work-pools/job-variable-overrides.mdx new file mode 100644 index 000000000000..42d976a7add3 --- /dev/null +++ b/docs/2.19.x/how-to-guides/work-pools/job-variable-overrides.mdx @@ -0,0 +1,246 @@ +--- +title: "Deeper Dive: Overriding Work Pool Job Variables" +sidebarTitle: Job Variable Overrides +--- + +As described in the [Deploying Flows to Work Pools and Workers](https://docs.prefect.io/guides/prefect-deploy/) guide, there are two ways to deploy flows to work pools: using a `prefect.yaml` file or using the `.deploy()` method. + +**In both cases, you can override job variables on a work pool for a given deployment.** + +While exactly _which_ job variables are available to be overridden depends on the type of work pool you're using at a given time, this guide will explore some common patterns for overriding job variables in both deployment methods. + +Background +------------------------------------------- + +First of all, what are _"job variables"_? + +Job variables are infrastructure related values that are configurable on a work pool, which may be relevant to how your flow run executes on your infrastructure. Job variables can be overridden on a per-deployment or per-flow run basis, allowing you to dynamically change infrastructure from the work pools defaults depending on your needs. + +* * * + +Let's use `env` - the only job variable that is configurable for all work pool types - as an example. + +When you create or edit a work pool, you can specify a set of environment variables that will be set in the runtime environment of the flow run. + +For example, you might want a certain deployment to have the following environment variables available: + +``` +{ + "EXECUTION_ENV": "staging", + "MY_NOT_SO_SECRET_CONFIG": "plumbus", +} + +``` + + +Rather than hardcoding these values into your work pool in the UI and making them available to all deployments associated with that work pool, you can override these values on a _per-deployment basis_. + +Let's look at how to do that. + +How to override job variables on a deployment +----------------------------------------------------------------------------------------------------------------- + +Say we have the following repo structure: + +``` +» tree +. +├── README.md +├── requirements.txt +├── demo_project +│   ├── daily_flow.py + +``` + + +... and we have some `demo_flow.py` file like this: + +``` +import os +from prefect import flow, task + +@task +def do_something_important(not_so_secret_value: str) -> None: + print(f"Doing something important with {not_so_secret_value}!") + +@flow(log_prints=True) +def some_work(): + environment = os.environ.get("EXECUTION_ENVIRONMENT", "local") + + print(f"Coming to you live from {environment}!") + + not_so_secret_value = os.environ.get("MY_NOT_SO_SECRET_CONFIG") + + if not_so_secret_value is None: + raise ValueError("You forgot to set MY_NOT_SO_SECRET_CONFIG!") + + do_something_important(not_so_secret_value) + +``` + + +### Using a `prefect.yaml` file + +In this case, let's also say we have the following deployment definition in a `prefect.yaml` file at the root of our repository: + +``` +deployments: +- name: demo-deployment + entrypoint: demo_project/demo_flow.py:some_work + work_pool: + name: local + schedule: null + +``` + + +**Note** + +While not the focus of this guide, note that this deployment definition uses a default "global" `pull` step, because one is not explicitly defined on the deployment. For reference, here's what that would look like at the top of the `prefect.yaml` file: + +``` +pull: +- prefect.deployments.steps.git_clone: &clone_repo + repository: https://github.com/some-user/prefect-monorepo + branch: main + +``` + + +#### Hard-coded job variables + +To provide the `EXECUTION_ENVIRONMENT` and `MY_NOT_SO_SECRET_CONFIG` environment variables to this deployment, we can add a `job_variables` section to our deployment definition in the `prefect.yaml` file: + +``` +deployments: +- name: demo-deployment + entrypoint: demo_project/demo_flow.py:some_work + work_pool: + name: local + job_variables: + env: + EXECUTION_ENVIRONMENT: staging + MY_NOT_SO_SECRET_CONFIG: plumbus + schedule: null + +``` + + +... and then run `prefect deploy -n demo-deployment` to deploy the flow with these job variables. + +We should then be able to see the job variables in the `Configuration` tab of the deployment in the UI: + +![Job variables in the UI](/images/overrides1.png) + +#### Using existing environment variables + +If you want to use environment variables that are already set in your local environment, you can template these in the `prefect.yaml` file using the `{{ $ENV_VAR_NAME }}` syntax: + +``` +deployments: +- name: demo-deployment + entrypoint: demo_project/demo_flow.py:some_work + work_pool: + name: local + job_variables: + env: + EXECUTION_ENVIRONMENT: "{{ $EXECUTION_ENVIRONMENT }}" + MY_NOT_SO_SECRET_CONFIG: "{{ $MY_NOT_SO_SECRET_CONFIG }}" + schedule: null + +``` + + +**Note** + +This assumes that the machine where `prefect deploy` is run would have these environment variables set. + +``` +export EXECUTION_ENVIRONMENT=staging +export MY_NOT_SO_SECRET_CONFIG=plumbus + +``` + + +As before, run `prefect deploy -n demo-deployment` to deploy the flow with these job variables, and you should see them in the UI under the `Configuration` tab. + +### Using the `.deploy()` method + +If you're using the `.deploy()` method to deploy your flow, the process is similar, but instead of having your `prefect.yaml` file define the job variables, you can pass them as a dictionary to the `job_variables` argument of the `.deploy()` method. + +We could add the following block to our `demo_project/daily_flow.py` file from the setup section: + +``` +if __name__ == "__main__": + flow.from_source( + source="https://github.com/zzstoatzz/prefect-monorepo.git", + entrypoint="src/demo_project/demo_flow.py:some_work" + ).deploy( + name="demo-deployment", + work_pool_name="local", # can only .deploy() to a local work pool in prefect>=2.15.1 + job_variables={ + "env": { + "EXECUTION_ENVIRONMENT": os.environ.get("EXECUTION_ENVIRONMENT", "local"), + "MY_NOT_SO_SECRET_CONFIG": os.environ.get("MY_NOT_SO_SECRET_CONFIG") + } + } + ) + +``` + + +**Note** + +The above example works assuming a couple things: - the machine where this script is run would have these environment variables set. + +``` +export EXECUTION_ENVIRONMENT=staging +export MY_NOT_SO_SECRET_CONFIG=plumbus + +``` + + +* `demo_project/daily_flow.py` _already exists_ in the repository at the specified path + + +Running this script with something like: + +``` +python demo_project/daily_flow.py + +``` + + +... will deploy the flow with the specified job variables, which should then be visible in the UI under the `Configuration` tab. + +![Job variables in the UI](/images/overrides2.png) + +How to override job variables on a flow run +------------------------------------------------------------------------------------------------------------- + +When running flows, you can pass in job variables that override any values set on the work pool or deployment. Any interface that runs deployments can accept job variables. + +### Using the custom run form in the UI + +Custom runs allow you to pass in a dictionary of variables into your flow run infrastructure. Using the same `env` example from above, we could do the following: + +![Job variables via custom run](/images/overrides3.png) + +### Using the CLI + +Similarly, runs kicked off via CLI accept job variables with the `-jv` or `--job-variable` flag. + +``` +prefect deployment run \ + --id "fb8e3073-c449-474b-b993-851fe5e80e53" \ + --job-variable MY_NEW_ENV_VAR=42 \ + --job-variable HELLO=THERE + +``` + + +### Using job variables in automations + +Additionally, runs kicked off via automation actions can use job variables, including ones rendered from Jinja templates. + +![Job variables via automation action](/images/overrides4.png) \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/work-pools/kubernetes.mdx b/docs/2.19.x/how-to-guides/work-pools/kubernetes.mdx new file mode 100644 index 000000000000..4848bcf6024b --- /dev/null +++ b/docs/2.19.x/how-to-guides/work-pools/kubernetes.mdx @@ -0,0 +1,588 @@ +--- +title: Running Flows with Kubernetes +sidebarTitle: Kubernetes +--- + +This guide will walk you through running your flows on Kubernetes. Though much of the guide is general to any Kubernetes cluster, there are differences between the managed Kubernetes offerings between cloud providers, especially when it comes to container registries and access management. We'll focus on Amazon Elastic Kubernetes Service (EKS). + +Prerequisites +------------------------------------------------- + +Before we begin, there are a few pre-requisites: + +1. A Prefect Cloud account +2. A cloud provider (AWS, GCP, or Azure) account +3. [Install](https://docs.prefect.io/getting-started/installation/) Python and Prefect +4. Install [Helm](https://helm.sh/docs/intro/install/) +5. Install the [Kubernetes CLI (kubectl)](https://kubernetes.io/docs/tasks/tools/install-kubectl/) + +Prefect is tested against Kubernetes 1.26.0 and newer minor versions. + +**Administrator Access** + +Though not strictly necessary, you may want to ensure you have admin access, both in Prefect Cloud and in your cloud provider. Admin access is only necessary during the initial setup and can be downgraded after. + + +Create a cluster +------------------------------------------------------- + +Let's start by creating a new cluster. If you already have one, skip ahead to the next section. + + + + +One easy way to get set up with a cluster in EKS is with [`eksctl`](https://eksctl.io/). Node pools can be backed by either EC2 instances or FARGATE. Let's choose FARGATE so there's less to manage. The following command takes around 15 minutes and must not be interrupted: + +``` +# Replace the cluster name with your own value +eksctl create cluster --fargate --name + +# Authenticate to the cluster. +aws eks update-kubeconfig --name + +``` + + + + + +You can get a GKE cluster up and running with a few commands using the [`gcloud` CLI](https://cloud.google.com/sdk/docs/install). We'll build a bare-bones cluster that is accessible over the open internet - this should **not** be used in a production environment. To deploy the cluster, your project must have a VPC network configured. + +First, authenticate to GCP by setting the following configuration options. + +``` +# Authenticate to gcloud +gcloud auth login + +# Specify the project & zone to deploy the cluster to +# Replace the project name with your GCP project name +gcloud config set project +gcloud config set compute/zone + +``` + + +Next, deploy the cluster - this command will take ~15 minutes to complete. Once the cluster has been created, authenticate to the cluster. + +``` +# Create cluster +# Replace the cluster name with your own value +gcloud container clusters create --num-nodes=1 \ +--machine-type=n1-standard-2 + +# Authenticate to the cluster +gcloud container clusters --region + +``` + + +**GCP Gotchas** + + +* You'll need to enable the default service account in the IAM console, or specify a different service account with the appropriate permissions to be used. + +``` +ERROR: (gcloud.container.clusters.create) ResponseError: code=400, message=Service account "000000000000-compute@developer.gserviceaccount.com" is disabled. + +``` + + +* Organization policy blocks creation of external (public) IPs. You can override this policy (if you have the appropriate permissions) under the `Organizational Policy` page within IAM. + +``` +creation failed: Constraint constraints/compute.vmExternalIpAccess violated for project 000000000000. Add instance projects//zones/us-east1-b/instances/gke-gke-guide-1-default-pool-c369c84d-wcfl to the constraint to use external IP with it." + +``` + + + + +You can quickly create an AKS cluster using the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/get-started-with-azure-cli), or use the Cloud Shell directly from the Azure portal [shell.azure.com](https://shell.azure.com/). + +First, authenticate to Azure if not already done. + +Next, deploy the cluster - this command will take ~4 minutes to complete. Once the cluster has been created, authenticate to the cluster. + +``` + # Create a Resource Group at the desired location, e.g. westus + az group create --name --location + + # Create a kubernetes cluster with default kubernetes version, default SKU load balancer (Standard) and default vm set type (VirtualMachineScaleSets) + az aks create --resource-group --name + + # Configure kubectl to connect to your Kubernetes cluster + az aks get-credentials --resource-group --name + + # Verify the connection by listing the cluster nodes + kubectl get nodes + +``` + + + + + + + + +Create a container registry +----------------------------------------------------------------------------- + +Besides a cluster, the other critical resource we'll need is a container registry. A registry is not strictly required, but in most cases you'll want to use custom images and/or have more control over where images are stored. If you already have a registry, skip ahead to the next section. + + + +Let's create a registry using the AWS CLI and authenticate the docker daemon to said registry: + +``` +# Replace the image name with your own value +aws ecr create-repository --repository-name + +# Login to ECR +# Replace the region and account ID with your own values +aws ecr get-login-password --region | docker login \ + --username AWS --password-stdin .dkr.ecr..amazonaws.com + +``` + + + + +Let's create a registry using the gcloud CLI and authenticate the docker daemon to said registry: + +``` +# Create artifact registry repository to host your custom image +# Replace the repository name with your own value; it can be the +# same name as your image +gcloud artifacts repositories create \ +--repository-format=docker --location=us + +# Authenticate to artifact registry +gcloud auth configure-docker us-docker.pkg.dev + +``` + + +Let's create a registry using the Azure CLI and authenticate the docker daemon to said registry: + +``` +# Name must be a lower-case alphanumeric +# Tier SKU can easily be updated later, e.g. az acr update --name --sku Standard +az acr create --resource-group \ + --name \ + --sku Basic + +# Attach ACR to AKS cluster +# You need Owner, Account Administrator, or Co-Administrator role on your Azure subscription as per Azure docs +az aks update --resource-group --name --attach-acr + +# You can verify AKS can now reach ACR +az aks check-acr --resource-group RESOURCE-GROUP-NAME> --name --acr .azurecr.io + +``` + + + + + + + + + + + + +Create a Kubernetes work pool +--------------------------------------------------------------------------------- + +[Work pools](https://docs.prefect.io/concepts/work-pools/) allow you to manage deployment infrastructure. We'll configure the default values for our Kubernetes base job template. Note that these values can be overridden by individual deployments. + +Let's switch to the Prefect Cloud UI, where we'll create a new Kubernetes work pool (alternatively, you could use the Prefect CLI to create a work pool). + +1. Click on the **Work Pools** tab on the left sidebar +2. Click the **+** button at the top of the page +3. Select **Kubernetes** as the work pool type +4. Click **Next** to configure the work pool settings + +Let's look at a few popular configuration options. + +**Environment Variables** + +Add environment variables to set when starting a flow run. So long as you are using a Prefect-maintained image and haven't overwritten the image's entrypoint, you can specify Python packages to install at runtime with `{"EXTRA_PIP_PACKAGES":"my_package"}`. For example `{"EXTRA_PIP_PACKAGES":"pandas==1.2.3"}` will install pandas version 1.2.3. Alternatively, you can specify package installation in a custom Dockerfile, which can allow you to take advantage of image caching. As we'll see below, Prefect can help us create a Dockerfile with our flow code and the packages specified in a `requirements.txt` file baked in. + +**Namespace** + +Set the Kubernetes namespace to create jobs within, such as `prefect`. By default, set to **default**. + +**Image** + +Specify the Docker container image for created jobs. If not set, the latest Prefect 2 image will be used (i.e. `prefecthq/prefect:2-latest`). Note that you can override this on each deployment through `job_variables`. + +**Image Pull Policy** + +Select from the dropdown options to specify when to pull the image. When using the `IfNotPresent` policy, make sure to use unique image tags, as otherwise old images could get cached on your nodes. + +**Finished Job TTL** + +Number of seconds before finished jobs are automatically cleaned up by Kubernetes' controller. You may want to set to 60 so that completed flow runs are cleaned up after a minute. + +**Pod Watch Timeout Seconds** + +Number of seconds for pod creation to complete before timing out. Consider setting to 300, especially if using a **serverless** type node pool, as these tend to have longer startup times. + +**Kubernetes Cluster Config** + +You can configure the Kubernetes cluster to use for job creation by specifying a `KubernetesClusterConfig` block. Generally you should leave the cluster config blank as the worker should be provisioned with appropriate access and permissions. Typically this setting is used when a worker is deployed to a cluster that is different from the cluster where flow runs are executed. + + +### **Advanced Settings** + + +Want to modify the default base job template to add other fields or delete existing fields? + +Select the **Advanced** tab and edit the JSON representation of the base job template. + +For example, to set a CPU request, add the following section under variables: + +``` +"cpu_request": { + "title": "CPU Request", + "description": "The CPU allocation to request for this pod.", + "default": "default", + "type": "string" +}, + +``` + + +Next add the following to the first `containers` item under `job_configuration`: + +``` +... +"containers": [ + { + ..., + "resources": { + "requests": { + "cpu": "{{ cpu_request }}" + } + } + } +], +... + +``` + + +Running deployments with this work pool will now request the specified CPU. + +After configuring the work pool settings, move to the next screen. + +Give the work pool a name and save. + +Our new Kubernetes work pool should now appear in the list of work pools. + +Create a Prefect Cloud API key +----------------------------------------------------------------------------------- + +While in the Prefect Cloud UI, create a Prefect Cloud API key if you don't already have one. Click on your profile avatar picture, then click your name to go to your profile settings, click [API Keys](https://app.prefect.cloud/my/api-keys) and hit the plus button to create a new API key here. Make sure to store it safely along with your other passwords, ideally via a password manager. + +Deploy a worker using Helm +--------------------------------------------------------------------------- + +With our cluster and work pool created, it's time to deploy a worker, which will set up Kubernetes infrastructure to run our flows. The best way to deploy a worker is using the [Prefect Helm Chart](https://github.com/PrefectHQ/prefect-helm/tree/main/charts/prefect-worker). + +### Add the Prefect Helm repository + +Add the Prefect Helm repository to your Helm client: + +``` +helm repo add prefect https://prefecthq.github.io/prefect-helm +helm repo update + +``` + + +### Create a namespace + +Create a new namespace in your Kubernetes cluster to deploy the Prefect worker: + +``` +kubectl create namespace prefect + +``` + + +### Create a Kubernetes secret for the Prefect API key + +``` +kubectl create secret generic prefect-api-key \ +--namespace=prefect --from-literal=key=your-prefect-cloud-api-key + +``` + + +### Configure Helm chart values + +Create a `values.yaml` file to customize the Prefect worker configuration. Add the following contents to the file: + +``` +worker: + cloudApiConfig: + accountId: + workspaceId: + config: + workPool: + +``` + + +These settings will ensure that the worker connects to the proper account, workspace, and work pool. + +View your Account ID and Workspace ID in your browser URL when logged into Prefect Cloud. For example: [https://app.prefect.cloud/account/abc-my-account-id-is-here/workspaces/123-my-workspace-id-is-here](https://app.prefect.cloud/account/abc-my-account-id-is-here/workspaces/123-my-workspace-id-is-here). + +### Create a Helm release + +Let's install the Prefect worker using the Helm chart with your custom `values.yaml` file: + +``` +helm install prefect-worker prefect/prefect-worker \ + --namespace=prefect \ + -f values.yaml + +``` + + +### Verify deployment + +Check the status of your Prefect worker deployment: + +``` +kubectl get pods -n prefect + +``` + + +Define a flow +------------------------------------------------- + +Let's start simple with a flow that just logs a message. In a directory named `flows`, create a file named `hello.py` with the following contents: + +``` +from prefect import flow, get_run_logger, tags + +@flow +def hello(name: str = "Marvin"): + logger = get_run_logger() + logger.info(f"Hello, {name}!") + +if __name__ == "__main__": + with tags("local"): + hello() + +``` + + +Run the flow locally with `python hello.py` to verify that it works. Note that we use the `tags` context manager to tag the flow run as `local`. This step is not required, but does add some helpful metadata. + +Define a Prefect deployment +----------------------------------------------------------------------------- + +Prefect has two recommended options for creating a deployment with dynamic infrastructure. You can define a deployment in a Python script using the `flow.deploy` mechanics or in a `prefect.yaml` definition file. The `prefect.yaml` file currently allows for more customization in terms of push and pull steps. Kubernetes objects are defined in YAML, so we expect many teams using Kubernetes work pools to create their deployments with YAML as well. To learn about the Python deployment creation method with `flow.deploy` refer to the [Workers & Work Pools tutorial page](https://docs.prefect.io/tutorial/workers/). + +The [`prefect.yaml`](https://docs.prefect.io/concepts/deployments/#managing-deployments) file is used by the `prefect deploy` command to deploy our flows. As a part of that process it will also build and push our image. Create a new file named `prefect.yaml` with the following contents: + +``` +# Generic metadata about this project +name: flows +prefect-version: 2.13.8 + +# build section allows you to manage and build docker images +build: +- prefect_docker.deployments.steps.build_docker_image: + id: build-image + requires: prefect-docker>=0.4.0 + image_name: "{{ $PREFECT_IMAGE_NAME }}" + tag: latest + dockerfile: auto + platform: "linux/amd64" + +# push section allows you to manage if and how this project is uploaded to remote locations +push: +- prefect_docker.deployments.steps.push_docker_image: + requires: prefect-docker>=0.4.0 + image_name: "{{ build-image.image_name }}" + tag: "{{ build-image.tag }}" + +# pull section allows you to provide instructions for cloning this project in remote locations +pull: +- prefect.deployments.steps.set_working_directory: + directory: /opt/prefect/flows + +# the definitions section allows you to define reusable components for your deployments +definitions: + tags: &common_tags + - "eks" + work_pool: &common_work_pool + name: "kubernetes" + job_variables: + image: "{{ build-image.image }}" + +# the deployments section allows you to provide configuration for deploying flows +deployments: +- name: "default" + tags: *common_tags + schedule: null + entrypoint: "flows/hello.py:hello" + work_pool: *common_work_pool + +- name: "arthur" + tags: *common_tags + schedule: null + entrypoint: "flows/hello.py:hello" + parameters: + name: "Arthur" + work_pool: *common_work_pool + +``` + + +We define two deployments of the `hello` flow: `default` and `arthur`. Note that by specifying `dockerfile: auto`, Prefect will automatically create a dockerfile that installs any `requirements.txt` and copies over the current directory. You can pass a custom Dockerfile instead with `dockerfile: Dockerfile` or `dockerfile: path/to/Dockerfile`. Also note that we are specifically building for the `linux/amd64` platform. This specification is often necessary when images are built on Macs with M series chips but run on cloud provider instances. + +**Deployment specific build, push, and pull** + +The build, push, and pull steps can be overridden for each deployment. This allows for more custom behavior, such as specifying a different image for each deployment. + + +Let's make sure we define our requirements in a `requirements.txt` file: + +``` +prefect>=2.13.8 +prefect-docker>=0.4.0 +prefect-kubernetes>=0.3.1 + +``` + + +The directory should now look something like this: + +``` +. +├── prefect.yaml +└── flows + ├── requirements.txt + └── hello.py + +``` + + +### Tag images with a Git SHA + +If your code is stored in a GitHub repository, it's good practice to tag your images with the Git SHA of the code used to build it. This can be done in the `prefect.yaml` file with a few minor modifications, and isn't yet an option with the Python deployment creation method. Let's use the `run_shell_script` command to grab the SHA and pass it to the `tag` parameter of `build_docker_image`: + +``` +build: +- prefect.deployments.steps.run_shell_script: + id: get-commit-hash + script: git rev-parse --short HEAD + stream_output: false +- prefect_docker.deployments.steps.build_docker_image: + id: build-image + requires: prefect-docker>=0.4.0 + image_name: "{{ $PREFECT_IMAGE_NAME }}" + tag: "{{ get-commit-hash.stdout }}" + dockerfile: auto + platform: "linux/amd64" + +``` + + +Let's also set the SHA as a tag for easy identification in the UI: + +``` +definitions: + tags: &common_tags + - "eks" + - "{{ get-commit-hash.stdout }}" + work_pool: &common_work_pool + name: "kubernetes" + job_variables: + image: "{{ build-image.image }}" + +``` + + +Authenticate to Prefect +--------------------------------------------------------------------- + +Before we deploy the flows to Prefect, we will need to authenticate via the Prefect CLI. We will also need to ensure that all of our flow's dependencies are present at `deploy` time. + +This example uses a virtual environment to ensure consistency across environments. + +``` +# Create a virtualenv & activate it +virtualenv prefect-demo +source prefect-demo/bin/activate + +# Install dependencies of your flow +prefect-demo/bin/pip install -r requirements.txt + +# Authenticate to Prefect & select the appropriate +# workspace to deploy your flows to +prefect-demo/bin/prefect cloud login + +``` + + +Deploy the flows +------------------------------------------------------- + +Now we're ready to deploy our flows which will build our images. The image name determines which registry it will end up in. We have configured our `prefect.yaml` file to get the image name from the `PREFECT_IMAGE_NAME` environment variable, so let's set that first: + + + +``` +export PREFECT_IMAGE_NAME=.dkr.ecr..amazonaws.com/ + +``` + + + +``` +export PREFECT_IMAGE_NAME=us-docker.pkg.dev/// + +``` + + +``` +export PREFECT_IMAGE_NAME=.azurecr.io/ + +``` + + + + + + + + + + + + + +To deploy your flows, ensure your Docker daemon is running first. Deploy all the flows with `prefect deploy --all` or deploy them individually by name: `prefect deploy -n hello/default` or `prefect deploy -n hello/arthur`. + +Run the flows +------------------------------------------------- + +Once the deployments are successfully created, we can run them from the UI or the CLI: + +``` +prefect deployment run hello/default +prefect deployment run hello/arthur + +``` + + +Congratulations! You just ran two deployments in Kubernetes. Head over to the UI to check their status! \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/work-pools/serverless-push-work.mdx b/docs/2.19.x/how-to-guides/work-pools/serverless-push-work.mdx new file mode 100644 index 000000000000..7fa39a7e44f6 --- /dev/null +++ b/docs/2.19.x/how-to-guides/work-pools/serverless-push-work.mdx @@ -0,0 +1,639 @@ +--- +sidebarTitle: Serverless Push Work Pools +title: Push Work to Serverless Computing Infrastructure +--- + + +Push [work pools](https://docs.prefect.io/concepts/work-pools/#work-pool-overview) are a special type of work pool that allows Prefect Cloud to submit flow runs for execution to serverless computing infrastructure without running a worker. Push work pools currently support execution in AWS ECS tasks, Azure Container Instances, Google Cloud Run jobs, and Modal. + +In this guide you will: + +* Create a push work pool that sends work to Amazon Elastic Container Service (AWS ECS), Azure Container Instances (ACI), Google Cloud Run, or Modal +* Deploy a flow to that work pool +* Execute a flow without having to run a worker or agent process to poll for flow runs + +You can automatically provision infrastructure and create your push work pool using the `prefect work-pool create` CLI command with the `--provision-infra` flag. This approach greatly simplifies the setup process. + +Let's explore automatic infrastructure provisioning for push work pools first, and then we'll cover how to manually set up your push work pool. + +Automatic infrastructure provisioning +------------------------------------------------------------------------------------------------- + +With Perfect Cloud you can provision infrastructure for use with an AWS ECS, Google Cloud Run, ACI push work pool. Push work pools in Prefect Cloud simplify the setup and management of the infrastructure necessary to run your flows. However, setting up infrastructure on your cloud provider can still be a time-consuming process. Prefect can dramatically simplify this process by automatically provisioning the necessary infrastructure for you. + +We'll use the `prefect work-pool create` CLI command with the `--provision-infra` flag to automatically provision your serverless cloud resources and set up your Prefect workspace to use a new push pool. + +### Prerequisites + +To use automatic infrastructure provisioning, you'll need to have the relevant cloud CLI library installed and to have authenticated with your cloud provider. + + +Install the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html), [authenticate with your AWS account](https://docs.aws.amazon.com/signin/latest/userguide/command-line-sign-in.html), and [set a default region](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods). + +If you already have the AWS CLI installed, be sure to [update to the latest version](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html#getting-started-install-instructions). + +You will need the following permissions in your authenticated AWS account: + +IAM Permissions: + +* iam:CreatePolicy +* iam:GetPolicy +* iam:ListPolicies +* iam:CreateUser +* iam:GetUser +* iam:AttachUserPolicy +* iam:CreateRole +* iam:GetRole +* iam:AttachRolePolicy +* iam:ListRoles +* iam:PassRole + +Amazon ECS Permissions: + +* ecs:CreateCluster +* ecs:DescribeClusters + +Amazon EC2 Permissions: + +* ec2:CreateVpc +* ec2:DescribeVpcs +* ec2:CreateInternetGateway +* ec2:AttachInternetGateway +* ec2:CreateRouteTable +* ec2:CreateRoute +* ec2:CreateSecurityGroup +* ec2:DescribeSubnets +* ec2:CreateSubnet +* ec2:DescribeAvailabilityZones +* ec2:AuthorizeSecurityGroupIngress +* ec2:AuthorizeSecurityGroupEgress + +Amazon ECR Permissions: + +* ecr:CreateRepository +* ecr:DescribeRepositories +* ecr:GetAuthorizationToken + +If you want to use AWS managed policies, you can use the following: + +* AmazonECS\_FullAccess +* AmazonEC2FullAccess +* IAMFullAccess +* AmazonEC2ContainerRegistryFullAccess + +Note that the above policies will give you all the permissions needed, but are more permissive than necessary. + +Docker is also required to build and push images to your registry. You can install Docker [here](https://docs.docker.com/get-docker/). + + + + +Install the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) and [authenticate with your Azure account](https://learn.microsoft.com/en-us/cli/azure/authenticate-azure-cli). + +If you already have the Azure CLI installed, be sure to update to the latest version with `az upgrade`. + +You will also need the following roles in your Azure subscription: + +* Contributor +* User Access Administrator +* Application Administrator +* Managed Identity Operator +* Azure Container Registry Contributor + +Docker is also required to build and push images to your registry. You can install Docker [here](https://docs.docker.com/get-docker/). + + + +Install the [gcloud CLI](https://cloud.google.com/sdk/docs/install) and [authenticate with your GCP project](https://cloud.google.com/docs/authentication/gcloud). + +If you already have the gcloud CLI installed, be sure to update to the latest version with `gcloud components update`. + +You will also need the following permissions in your GCP project: + +* resourcemanager.projects.list +* serviceusage.services.enable +* iam.serviceAccounts.create +* iam.serviceAccountKeys.create +* resourcemanager.projects.setIamPolicy +* artifactregistry.repositories.create + +Docker is also required to build and push images to your registry. You can install Docker [here](https://docs.docker.com/get-docker/). + + +Install `modal` by running: +``` +pip install modal +``` + +Create a Modal API token by running: +``` +modal token new + +``` + + + + + + + + + + +### Automatically creating a new push work pool and provisioning infrastructure + +Here's the command to create a new push work pool and configure the necessary infrastructure. + + + + +``` +prefect work-pool create --type ecs:push --provision-infra my-ecs-pool + +``` + + +Using the `--provision-infra` flag will automatically set up your default AWS account to be ready to execute flows via ECS tasks. In your AWS account, this command will create a new IAM user, IAM policy, ECS cluster that uses AWS Fargate, VPC, and ECR repository if they don't already exist. In your Prefect workspace, this command will create an [`AWSCredentials` block](https://prefecthq.github.io/prefect-aws/credentials/) for storing the generated credentials. + +Here's an abbreviated example output from running the command: + +``` +╭───────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ Provisioning infrastructure for your work pool my-ecs-pool will require: │ +│ │ +│ - Creating an IAM user for managing ECS tasks: prefect-ecs-user │ +│ - Creating and attaching an IAM policy for managing ECS tasks: prefect-ecs-policy │ +│ - Storing generated AWS credentials in a block │ +│ - Creating an ECS cluster for running Prefect flows: prefect-ecs-cluster │ +│ - Creating a VPC with CIDR 172.31.0.0/16 for running ECS tasks: prefect-ecs-vpc │ +│ - Creating an ECR repository for storing Prefect images: prefect-flows │ +╰───────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +Proceed with infrastructure provisioning? [y/n]: y +Provisioning IAM user +Creating IAM policy +Generating AWS credentials +Creating AWS credentials block +Provisioning ECS cluster +Provisioning VPC +Creating internet gateway +Setting up subnets +Setting up security group +Provisioning ECR repository +Authenticating with ECR +Setting default Docker build namespace +Provisioning Infrastructure ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 +Infrastructure successfully provisioned! +Created work pool 'my-ecs-pool'! + +``` + + +Default Docker build namespace + +After infrastructure provisioning completes, you will be logged into your new ECR repository and the default Docker build namespace will be set to the URL of the registry. + +While the default namespace is set, you will not need to provide the registry URL when building images as part of your deployment process. + +To take advantage of this, you can write your deploy scripts like this: + +example\_deploy\_script.py + +``` +from prefect import flow +from prefect.deployments import DeploymentImage + +@flow(log_prints=True) +def my_flow(name: str = "world"): + print(f"Hello {name}! I'm a flow running in a ECS task!") + + +if __name__ == "__main__": + my_flow.deploy( + name="my-deployment", + work_pool_name="my-work-pool", + image=DeploymentImage( + name="my-repository:latest", + platform="linux/amd64", + ) + ) + +``` + + +This will build an image with the tag `/my-image:latest` and push it to the registry. + +Your image name will need to match the name of the repository created with your work pool. You can create new repositories in the ECR console. + + +``` +prefect work-pool create --type azure-container-instance:push --provision-infra my-aci-pool + +``` + + +Using the `--provision-infra` flag will automatically set up your default Azure account to be ready to execute flows via Azure Container Instances. In your Azure account, this command will create a resource group, app registration, service account with necessary permission, generate a secret for the app registration, and create an Azure Container Registry, if they don't already exist. In your Prefect workspace, this command will create an [`AzureContainerInstanceCredentials` block](https://prefecthq.github.io/prefect-azure/credentials/#prefect_azure.credentials.AzureContainerInstanceCredentials) for storing the client secret value from the generated secret. + +Here's an abbreviated example output from running the command: + +``` +╭───────────────────────────────────────────────────────────────────────────────────────────╮ +│ Provisioning infrastructure for your work pool my-aci-work-pool will require: │ +│ │ +│ Updates in subscription Azure subscription 1 │ +│ │ +│ - Create a resource group in location eastus │ +│ - Create an app registration in Azure AD prefect-aci-push-pool-app │ +│ - Create/use a service principal for app registration │ +│ - Generate a secret for app registration │ +│ - Create an Azure Container Registry with prefix prefect │ +│ - Create an identity prefect-acr-identity to allow access to the created registry │ +│ - Assign Contributor role to service account │ +│ - Create an ACR registry for image hosting │ +│ - Create an identity for Azure Container Instance to allow access to the registry │ +│ │ +│ Updates in Prefect workspace │ +│ │ +│ - Create Azure Container Instance credentials block aci-push-pool-credentials │ +│ │ +╰───────────────────────────────────────────────────────────────────────────────────────────╯ +Proceed with infrastructure provisioning? [y/n]: +Creating resource group +Creating app registration +Generating secret for app registration +Creating ACI credentials block +ACI credentials block 'aci-push-pool-credentials' created in Prefect Cloud +Assigning Contributor role to service account +Creating Azure Container Registry +Creating identity +Provisioning infrastructure... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 +Infrastructure successfully provisioned for 'my-aci-work-pool' work pool! +Created work pool 'my-aci-work-pool'! + +``` + + +Default Docker build namespace + +After infrastructure provisioning completes, you will be logged into your new Azure Container Registry and the default Docker build namespace will be set to the URL of the registry. + +While the default namespace is set, any images you build without specifying a registry or username/organization will be pushed to the registry. + +To take advantage of this functionality, you can write your deploy scripts like this: + + +```python example_deploy_script.py + +from prefect import flow +from prefect.deployments import DeploymentImage + + +@flow(log_prints=True) +def my_flow(name: str = "world"): + print(f"Hello {name}! I'm a flow running on an Azure Container Instance!") + + +if __name__ == "__main__": + my_flow.deploy( + name="my-deployment", + work_pool_name="my-work-pool", + image=DeploymentImage( + name="my-image:latest", + platform="linux/amd64", + ) + ) + +``` + + +This will build an image with the tag `/my-image:latest` and push it to the registry. + + + +``` +prefect work-pool create --type cloud-run:push --provision-infra my-cloud-run-pool + +``` + + +Using the `--provision-infra` flag will allow you to select a GCP project to use for your work pool and automatically configure it to be ready to execute flows via Cloud Run. In your GCP project, this command will activate the Cloud Run API, create a service account, and create a key for the service account, if they don't already exist. In your Prefect workspace, this command will create a [`GCPCredentials` block](https://prefecthq.github.io/prefect-gcp/credentials/) for storing the service account key. + +Here's an abbreviated example output from running the command: + +``` +╭──────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ Provisioning infrastructure for your work pool my-cloud-run-pool will require: │ +│ │ +│ Updates in GCP project central-kit-405415 in region us-central1 │ +│ │ +│ - Activate the Cloud Run API for your project │ +│ - Activate the Artifact Registry API for your project │ +│ - Create an Artifact Registry repository named prefect-images │ +│ - Create a service account for managing Cloud Run jobs: prefect-cloud-run │ +│ - Service account will be granted the following roles: │ +│ - Service Account User │ +│ - Cloud Run Developer │ +│ - Create a key for service account prefect-cloud-run │ +│ │ +│ Updates in Prefect workspace │ +│ │ +│ - Create GCP credentials block my--pool-push-pool-credentials to store the service account key │ +│ │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +Proceed with infrastructure provisioning? [y/n]: y +Activating Cloud Run API +Activating Artifact Registry API +Creating Artifact Registry repository +Configuring authentication to Artifact Registry +Setting default Docker build namespace +Creating service account +Assigning roles to service account +Creating service account key +Creating GCP credentials block +Provisioning Infrastructure ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 +Infrastructure successfully provisioned! +Created work pool 'my-cloud-run-pool'! + +``` + + +Default Docker build namespace + +After infrastructure provisioning completes, you will be logged into your new Artifact Registry repository and the default Docker build namespace will be set to the URL of the repository. + +While the default namespace is set, any images you build without specifying a registry or username/organization will be pushed to the repository. + +To take advantage of this functionality, you can write your deploy scripts like this: + +example\_deploy\_script.py + +``` +from prefect import flow +from prefect.deployments import DeploymentImage + + +@flow(log_prints=True) +def my_flow(name: str = "world"): + print(f"Hello {name}! I'm a flow running on Cloud Run!") + + +if __name__ == "__main__": + my_flow.deploy( + name="my-deployment", + work_pool_name="above-ground", + image=DeploymentImage( + name="my-image:latest", + platform="linux/amd64", + ) + ) + +``` + + +This will build an image with the tag `-docker.pkg.dev///my-image:latest` and push it to the repository. + + + +``` +prefect work-pool create --type modal:push --provision-infra my-modal-pool + +``` + + +Using the `--provision-infra` flag will trigger the creation of a `ModalCredentials` block in your Prefect Cloud workspace. This block will store your Modal API token, which is used to authenticate with Modal's API. By default, the token for your current Modal profile will be used for the new `ModalCredentials` block. If Prefect is unable to discover a Modal API token for your current profile, you will be prompted to create a new one. + +That's it! You're ready to create and schedule deployments that use your new push work pool. Reminder that no worker is needed to run flows with a push work pool. + +### Using existing resources with automatic infrastructure provisioning + +If you already have the necessary infrastructure set up, Prefect will detect that upon work pool creation and the infrastructure provisioning for that resource will be skipped. + +For example, here's how `prefect work-pool create my-work-pool --provision-infra` looks when existing Azure resources are detected: + +``` +Proceed with infrastructure provisioning? [y/n]: y +Creating resource group +Resource group 'prefect-aci-push-pool-rg' already exists in location 'eastus'. +Creating app registration +App registration 'prefect-aci-push-pool-app' already exists. +Generating secret for app registration +Provisioning infrastructure +ACI credentials block 'bb-push-pool-credentials' created +Assigning Contributor role to service account... +Service principal with object ID '4be6fed7-...' already has the 'Contributor' role assigned in +'/subscriptions/.../' +Creating Azure Container Instance +Container instance 'prefect-aci-push-pool-container' already exists. +Creating Azure Container Instance credentials block +Provisioning infrastructure... ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 +Infrastructure successfully provisioned! +Created work pool 'my-work-pool'! + +``` + + + + + + +Provisioning infrastructure for an existing push work pool +------------------------------------------------------------------------------------------------------------------------------------------- + +If you already have a push work pool set up, but haven't configured the necessary infrastructure, you can use the `provision-infra` sub-command to provision the infrastructure for that work pool. For example, you can run the following command if you have a work pool named "my-work-pool". + +``` +prefect work-pool provision-infra my-work-pool + +``` + + +Prefect will create the necessary infrastructure for the `my-work-pool` work pool and provide you with a summary of the changes to be made: + +``` +╭────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ Provisioning infrastructure for your work pool my-work-pool will require: │ +│ │ +│ Updates in subscription Azure subscription 1 │ +│ │ +│ - Create a resource group in location eastus │ +│ - Create an app registration in Azure AD prefect-aci-push-pool-app │ +│ - Create/use a service principal for app registration │ +│ - Generate a secret for app registration │ +│ - Assign Contributor role to service account │ +│ - Create Azure Container Instance 'aci-push-pool-container' in resource group prefect-aci-push-pool-rg │ +│ │ +│ Updates in Prefect workspace │ +│ │ +│ - Create Azure Container Instance credentials block aci-push-pool-credentials │ +│ │ +╰────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +Proceed with infrastructure provisioning? [y/n]: y + +``` + + +This command can speed up your infrastructure setup process. + +As with the examples above, you will need to have the related cloud CLI library installed and be authenticated with your cloud provider. + +Manual infrastructure provisioning +------------------------------------------------------------------------------------------- + +If you prefer to set up your infrastructure manually, don't include the `--provision-infra` flag in the CLI command. In the examples below, we'll create a push work pool via the Prefect Cloud UI. + + + + +To push work to ECS, AWS credentials are required. + +Create a user and attach the _AmazonECS\_FullAccess_ permissions. + +From that user's page create credentials and store them somewhere safe for use in the next section. + + +To push work to Azure, an Azure subscription, resource group and tenant secret are required. + +**Create Subscription and Resource Group** + +1. In the Azure portal, create a subscription. +2. Create a resource group within your subscription. + +**Create App Registration** + +1. In the Azure portal, create an app registration. +2. In the app registration, create a client secret. Copy the value and store it somewhere safe. + +**Add App Registration to Resource Group** + +1. Navigate to the resource group you created earlier. +2. Choose the "Access control (IAM)" blade in the left-hand side menu. Click "+ Add" button at the top, then "Add role assignment". +3. Go to the "Privileged administrator roles" tab, click on "Contributor", then click "Next" at the bottom of the page. +4. Click on "+ Select members". Type the name of the app registration (otherwise it may not autopopulate) and click to add it. Then hit "Select" and click "Next". The default permissions associated with a role like "Contributor" might not always be sufficient for all operations related to Azure Container Instances (ACI). The specific permissions required can depend on the operations you need to perform (like creating, running, and deleting ACI container groups) and your organization's security policies. In some cases, additional permissions or custom roles might be necessary. +5. Click "Review + assign" to finish. + + + +A GCP service account and an API Key are required, to push work to Cloud Run. + +Create a service account by navigating to the service accounts page and clicking _Create_. Name and describe your service account, and click _continue_ to configure permissions. + +The service account must have two roles at a minimum, _Cloud Run Developer_, and _Service Account User_. + +![Configuring service account permissions in GCP](https://docs.prefect.io/img/guides/gcr-service-account-setup.png) + +Once the Service account is created, navigate to its _Keys_ page to add an API key. Create a JSON type key, download it, and store it somewhere safe for use in the next section. + + + +A Modal API token is required to push work to Modal. + +Create a Modal API token by navigating to **Settings** in the Modal UI. In the **API Tokens** section of the Settings page, click **New Token**. + +Copy the token ID and token secret and store them somewhere safe for use in the next section. + + + + + + + + + +### Work pool configuration + +Our push work pool will store information about what type of infrastructure our flow will run on, what default values to provide to compute jobs, and other important execution environment parameters. Because our push work pool needs to integrate securely with your serverless infrastructure, we need to start by storing our credentials in Prefect Cloud, which we'll do by making a block. + +### Creating a Credentials block + + + + +Navigate to the blocks page, click create new block, and select AWS Credentials for the type. + +For use in a push work pool, region, access key, and access key secret must be set. + +Provide any other optional information and create your block. + + +Navigate to the blocks page and click the "+" at the top to create a new block. Find the Azure Container Instance Credentials block and click "Add +". + +Locate the client ID and tenant ID on your app registration and use the client secret you saved earlier. Be sure to use the value of the secret, not the secret ID! + +Provide any other optional information and click "Create". + + +Navigate to the blocks page, click create new block, and select GCP Credentials for the type. + +For use in a push work pool, this block must have the contents of the JSON key stored in the Service Account Info field, as such: + +![Configuring GCP Credentials block for use in cloud run push work pools](/images/serverless1.png) + +Provide any other optional information and create your block. + + + + +Navigate to the blocks page, click create new block, and select Modal Credentials for the type. + +For use in a push work pool, this block must have the token ID and token secret stored in the Token ID and Token Secret fields, respectively. + + + + + + + + + +### Creating a push work pool + +Now navigate to the work pools page. Click **Create** to start configuring your push work pool by selecting a push option in the infrastructure type step. + + +Each step has several optional fields that are detailed in the [work pools documentation](https://docs.prefect.io/concepts/work-pools/). Select the block you created under the AWS Credentials field. This will allow Prefect Cloud to securely interact with your ECS cluster. + + +Fill in the subscription ID and resource group name from the resource group you created. +Add the Azure Container Instance Credentials block you created in the step above. + + + +Each step has several optional fields that are detailed in the [work pools documentation](https://docs.prefect.io/concepts/work-pools/). Select the block you created under the GCP Credentials field. This will allow Prefect Cloud to securely interact with your GCP project. + + +Each step has several optional fields that are detailed in the [work pools documentation](https://docs.prefect.io/concepts/work-pools/). Select the block you created under the Modal Credentials field. This will allow Prefect Cloud to securely interact with your Modal account. + + + + +Create your pool and you are ready to deploy flows to your Push work pool. + +Deployment +------------------------------------------- + +Deployment details are described in the deployments [concept section](https://docs.prefect.io/concepts/deployments/). Your deployment needs to be configured to send flow runs to our push work pool. For example, if you create a deployment through the interactive command line experience, choose the work pool you just created. If you are deploying an existing `prefect.yaml` file, the deployment would contain: + +``` + work_pool: + name: my-push-pool + +``` + + +Deploying your flow to the `my-push-pool` work pool will ensure that runs that are ready for execution will be submitted immediately, without the need for a worker to poll for them. + +**Serverless infrastructure may require a certain image architecture** + +Note that serverless infrastructure may assume a certain Docker image architecture; for example, Google Cloud Run will fail to run images built with `linux/arm64` architecture. If using Prefect to build your image, you can change the image architecture through the `platform` keyword (e.g., `platform="linux/amd64"`). + +Putting it all together +--------------------------------------------------------------------- + +With your deployment created, navigate to its detail page and create a new flow run. You'll see the flow start running without ever having to poll the work pool, because Prefect Cloud securely connected to your serverless infrastructure, created a job, ran the job, and began reporting on its execution. +![](/images/serverless2.png) + +Next steps +------------------------------------------- + +Learn more about workers and work pools in the [Prefect concept documentation](https://docs.prefect.io/concepts/work-pools/). + +Learn about installing dependencies at runtime or baking them into your Docker image in the [Deploying Flows to Work Pools and Workers guide](https://docs.prefect.io/guides/prefect-deploy/#creating-work-pool-based-deployments-with-deploy). \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/work-pools/serverless-work-pools.mdx b/docs/2.19.x/how-to-guides/work-pools/serverless-work-pools.mdx new file mode 100644 index 000000000000..d7d49c26bc6b --- /dev/null +++ b/docs/2.19.x/how-to-guides/work-pools/serverless-work-pools.mdx @@ -0,0 +1,54 @@ +--- +sidebarTitle: Serverless Work Pools with Workers +title: Run Deployments on Serverless Infrastructure with Prefect Workers +--- + +Prefect provides hybrid work pools for workers to run flows on the serverless platforms of major cloud providers. The following options are available: + +* AWS Elastic Container Service (ECS) +* Azure Container Instances (ACI) +* Google Cloud Run +* Google Cloud Run V2 +* Google Vertex AI + +![Work pool options](/images/serverless-work-pools1.png) + +* Create a work pool that sends work to your chosen serverless infrastructure +* Deploy a flow to that work pool +* Start a worker in your serverless cloud provider that will poll its matched work pool for scheduled runs +* Schedule a deployment run that a worker will pick up from the work pool and run on your serverless infrastructure + +**Push work pools don't require a worker** + +Options for push work pool versions of AWS ECS, Azure Container Instances, and Google Cloud Run that do not require a worker are available with Prefect Cloud. These push work pool options require connection configuration information to be stored on Prefect Cloud. Read more in the [Serverless Push Work Pool Guide](https://docs.prefect.io/guides/deployment/push-work-pools/). + +This is a brief overview of the options to run workflows on serverless infrastructure. For in-depth guides, see the Prefect integration libraries: + +* [AWS ECS guide in the `prefect-aws` docs](https://prefecthq.github.io/prefect-aws/ecs_guide/) +* [Azure Container Instances guide](https://docs.prefect.io/integrations/prefect-azure/aci_worker/) +* [Google Cloud Run guide in the `prefect-gcp` docs](https://prefecthq.github.io/prefect-gcp/gcp-worker-guide/). +* For Google Vertex AI, follow the Cloud Run guide, substituting _Google Vertex AI_ where _Google Cloud Run_ is mentioned. + + +**Choosing between Google Cloud Run and Google Vertex AI** + +Google Vertex AI is well-suited for machine learning model training applications in which GPUs or TPUs and high resource levels are desired. + + +Steps +--------------------------------- + +1. Make sure you have an user or service account on your chosen cloud provider with the necessary permissions to run serverless jobs +2. Create the appropriate serverless work pool that uses a worker in the Prefect UI +3. Create a deployment that references the work pool +4. Start a worker in your chose serverless cloud provider infrastructure +5. Run the deployment + +Next steps +------------------------------------------- + +Options for push versions on AWS ECS, Azure Container Instances, and Google Cloud Run work pools that do not require a worker are available with Prefect Cloud. Read more in the [Serverless Push Work Pool Guide](https://docs.prefect.io/guides/deployment/push-work-pools/). + +Learn more about workers and work pools in the [Prefect concept documentation](https://docs.prefect.io/concepts/work-pools/). + +Learn about installing dependencies at runtime or baking them into your Docker image in the [Deploying Flows to Work Pools and Workers guide](https://docs.prefect.io/guides/prefect-deploy/#creating-work-pool-based-deployments-with-deploy). \ No newline at end of file diff --git a/docs/2.19.x/how-to-guides/work-pools/upgrade-from-agents--workers.mdx b/docs/2.19.x/how-to-guides/work-pools/upgrade-from-agents--workers.mdx new file mode 100644 index 000000000000..bd4c3b1b06a7 --- /dev/null +++ b/docs/2.19.x/how-to-guides/work-pools/upgrade-from-agents--workers.mdx @@ -0,0 +1,337 @@ +--- +title: Upgrade from Agents to Workers +--- +# Upgrade from Agents to Workers - Prefect Docs +[](https://github.com/PrefectHQ/prefect/edit/main/docs/guides/upgrade-guide-agents-to-workers.md "Edit this page") + +Upgrading from agents to workers significantly enhances the experience of deploying flows. It simplifies the specification of each flow's infrastructure and runtime environment. + +A [worker](https://docs.prefect.io/concepts/work-pools/#worker-overview) is the fusion of an [agent](https://docs.prefect.io/concepts/agents/) with an [infrastructure block](https://docs.prefect.io/concepts/infrastructure/). Like agents, workers poll a work pool for flow runs that are scheduled to start. Like infrastructure blocks, workers are typed - they work with only one kind of infrastructure, and they specify the default configuration for jobs submitted to that infrastructure. + +Accordingly, workers are not a drop-in replacement for agents. **Using workers requires deploying flows differently.** In particular, deploying a flow with a worker does not involve specifying an infrastructure block. Instead, infrastructure configuration is specified on the [work pool](https://docs.prefect.io/concepts/work-pools/) and passed to each worker that polls work from that pool. + +This guide provides an overview of the differences between agents and workers. It also describes how to upgrade from agents to workers in just a few quick steps. + +Enhancements +----------------------------------------------- + +### Workers + +* Improved visibility into the status of each worker, including when a worker was started and when it last polled. +* Better handling of race conditions for high availability use cases. + +### Work pools + +* Work pools allow greater customization and governance of infrastructure parameters for deployments via their [base job template](https://docs.prefect.io/concepts/work-pools/#base-job-template). +* Prefect Cloud [push work pools](https://docs.prefect.io/guides/deployment/push-work-pools/) enable flow execution in your cloud provider environment without needing to host a worker. +* Prefect Cloud [managed work pools](https://docs.prefect.io/guides/managed-execution/) allow you to run flows on Prefect's infrastructure, without needing to host a worker or configure cloud provider infrastructure. + +### Improved deployment interfaces + +* The Python deployment experience with `.deploy()` or the alternative deployment experience with `prefect.yaml` are more flexible and easier to use than block and agent-based deployments. +* Both options allow you to [deploy multiple flows](https://docs.prefect.io/concepts/deployments/#working-with-multiple-deployments) with a single command. +* Both options allow you to build Docker images for your flows to create portable execution environments. +* The YAML-based API supports [templating](https://docs.prefect.io/concepts/deployments/#templating-options) to enable [dryer deployment definitions](https://docs.prefect.io/concepts/deployments/#reusing-configuration-across-deployments). + +* * * + +What's different +------------------------------------------------------ + +1. **Deployment CLI and Python SDK:** + + `prefect deployment build `/`prefect deployment apply` --> [`prefect deploy`](https://docs.prefect.io/concepts/deployments/#deployment-declaration-reference) + + Prefect will now automatically detect flows in your repo and provide a [wizard](https://docs.prefect.io/#step-5-deploy-the-flow) 🧙 to guide you through setting required attributes for your deployments. + + `Deployment.build_from_flow` --> [`flow.deploy`](https://docs.prefect.io/api-ref/prefect/flows/#prefect.flows.Flow.deploy) + +2. **Configuring remote flow code storage:** + + storage blocks --> [pull action](https://docs.prefect.io/concepts/deployments/#the-pull-action) + + When using the YAML-based deployment API, you can configure a pull action in your `prefect.yaml` file to specify how to retrieve flow code for your deployments. You can use configuration from your existing storage blocks to define your pull action [via templating](https://docs.prefect.io/guides/prefect-deploy/#templating-options). + + When using the Python deployment API, you can pass any storage block to the `flow.deploy` method to specify how to retrieve flow code for your deployment. + +3. **Configuring flow run infrastructure:** + + infrastructure blocks --> [typed work pool](https://docs.prefect.io/concepts/work-pools/#worker-types) + + Default infrastructure config is now set on the typed work pool, and can be overwritten by individual deployments. + +4. **Managing multiple deployments:** + + Create and/or update many deployments at once through a [`prefect.yaml`](https://docs.prefect.io/concepts/deployments/#working-with-multiple-deployments) file or use the [`deploy`](https://docs.prefect.io/api-ref/prefect/deployments/runner/#prefect.deployments.runner.deploy) function. + + +What's similar +-------------------------------------------------- + +* Storage blocks can be set as the pull action in a `prefect.yaml` file. +* Infrastructure blocks have configuration fields similar to typed work pools. +* Deployment-level infrastructure overrides operate in much the same way. + + `infra_override` -> [`job_variable`](https://docs.prefect.io/concepts/deployments/#work-pool-fields) + +* The process for starting an agent and [starting a worker](https://docs.prefect.io/concepts/work-pools/#starting-a-worker) in your environment are virtually identical. + + `prefect agent start --pool ` --> `prefect worker start --pool ` + + ** Worker Helm chart** + + + If you host your agents in a Kubernetes cluster, you can use the [Prefect worker Helm chart](https://github.com/PrefectHQ/prefect-helm/tree/main/charts/prefect-worker) to host workers in your cluster. + + +Upgrade guide +------------------------------------------------- + +If you have existing deployments that use infrastructure blocks, you can quickly upgrade them to be compatible with workers by following these steps: + +1. **[Create a work pool](https://docs.prefect.io/concepts/work-pools/#work-pool-configuration)** + +This new work pool will replace your infrastructure block. + +You can use the [`.publish_as_work_pool`](https://docs.prefect.io/api-ref/prefect/infrastructure/#prefect.infrastructure.Infrastructure.publish_as_work_pool) method on any infrastructure block to create a work pool with the same configuration. + +For example, if you have a `KubernetesJob` infrastructure block named 'my-k8s-job', you can create a work pool with the same configuration with this script: + +``` +from prefect.infrastructure import KubernetesJob + +KubernetesJob.load("my-k8s-job").publish_as_work_pool() + +``` + + +``` +Running this script will create a work pool named 'my-k8s-job' with the same configuration as your infrastructure block. + +``` + + +**Serving flows** + +If you are using a `Process` infrastructure block and a `LocalFilesystem` storage block (or aren't using an infrastructure and storage block at all), you can use [`flow.serve`](https://docs.prefect.io/api-ref/prefect/flows/#prefect.flows.Flow.deploy) to create a deployment without needing to specify a work pool name or start a worker. + +This is a quick way to create a deployment for a flow and is a great way to manage your deployments if you don't need the dynamic infrastructure creation or configuration offered by workers. + +Check out our [Docker guide](https://docs.prefect.io/guides/docker/) for how to build a served flow into a Docker image and host it in your environment. + + +1. **[Start a worker](https://docs.prefect.io/concepts/work-pools/#starting-a-worker)** + +This worker will replace your agent and poll your new work pool for flow runs to execute. + +``` +prefect worker start -p + +``` + + +1. **Deploy your flows to the new work pool** + +To deploy your flows to the new work pool, you can use `flow.deploy` for a Pythonic deployment experience or `prefect deploy` for a YAML-based deployment experience. + +If you currently use `Deployment.build_from_flow`, we recommend using `flow.deploy`. + +If you currently use `prefect deployment build` and `prefect deployment apply`, we recommend using `prefect deploy`. + +### `flow.deploy` + +If you have a Python script that uses `Deployment.build_from_flow`, you can replace it with `flow.deploy`. + +Most arguments to `Deployment.build_from_flow` can be translated directly to `flow.deploy`, but here are some changes that you may need to make: + +* Replace `infrastructure` with `work_pool_name`. +* If you've used the `.publish_as_work_pool` method on your infrastructure block, use the name of the created work pool. +* Replace `infra_overrides` with `job_variables`. +* Replace `storage` with a call to [`flow.from_source`](https://docs.prefect.io/api-ref/prefect/flows/#prefect.flows.Flow.deploy). +* `flow.from_source` will load your flow from a remote storage location and make it deployable. Your existing storage block can be passed to the `source` argument of `flow.from_source`. + +Below are some examples of how to translate `Deployment.build_from_flow` to `flow.deploy`. + +#### Deploying without any blocks + +If you aren't using any blocks: + +```python +from prefect import flow + +@flow(log_prints=True) +def my_flow(name: str = "world"): + print(f"Hello {name}! I'm a flow from a Python script!") + +if __name__ == "__main__": + Deployment.build_from_flow( + my_flow, + name="my-deployment", + parameters=dict(name="Marvin"), + ) + +``` + + +You can replace `Deployment.build_from_flow` with `flow.serve` : + +```python +from prefect import flow + +@flow(log_prints=True) +def my_flow(name: str = "world"): + print(f"Hello {name}! I'm a flow from a Python script!") + +if __name__ == "__main__": + my_flow.serve( + name="my-deployment", + parameters=dict(name="Marvin"), + ) + +``` + + +This will start a process that will serve your flow and execute any flow runs that are scheduled to start. + +#### Deploying using a storage block + +If you currently use a storage block to load your flow code but no infrastructure block: + +```python +from prefect import flow +from prefect.storage import GitHub + +@flow(log_prints=True) +def my_flow(name: str = "world"): + print(f"Hello {name}! I'm a flow from a GitHub repo!") + +if __name__ == "__main__": + Deployment.build_from_flow( + my_flow, + name="my-deployment", + storage=GitHub.load("demo-repo"), + parameters=dict(name="Marvin"), + ) + +``` + + +you can use `flow.from_source` to load your flow from the same location and `flow.serve` to create a deployment: + +```python +from prefect import flow +from prefect.storage import GitHub + +if __name__ == "__main__": + flow.from_source( + source=GitHub.load("demo-repo"), + entrypoint="example.py:my_flow" + ).serve( + name="my-deployment", + parameters=dict(name="Marvin"), + ) + +``` + + +This will allow you to execute scheduled flow runs without starting a worker. Additionally, the process serving your flow will regularly check for updates to your flow code and automatically update the flow if it detects any changes to the code. + +#### Deploying using an infrastructure and storage block + +For the code below, we'll need to create a work pool from our infrastructure block and pass it to `flow.deploy` as the `work_pool_name` argument. We'll also need to pass our storage block to `flow.from_source` as the `source` argument. + +```python +from prefect import flow +from prefect.deployments import Deployment +from prefect.filesystems import GitHub +from prefect.infrastructure.kubernetes import KubernetesJob + + +@flow(log_prints=True) +def my_flow(name: str = "world"): + print(f"Hello {name}! I'm a flow from a GitHub repo!") + + +if __name__ == "__main__": + Deployment.build_from_flow( + my_flow, + name="my-deployment", + storage=GitHub.load("demo-repo"), + entrypoint="example.py:my_flow", + infrastructure=KubernetesJob.load("my-k8s-job"), + infra_overrides=dict(pull_policy="Never"), + parameters=dict(name="Marvin"), + ) + +``` + + +The equivalent deployment code using `flow.deploy` would look like this: + +```python +from prefect import flow +from prefect.storage import GitHub + +if __name__ == "__main__": + flow.from_source( + source=GitHub.load("demo-repo"), + entrypoint="example.py:my_flow" + ).deploy( + name="my-deployment", + work_pool_name="my-k8s-job", + job_variables=dict(pull_policy="Never"), + parameters=dict(name="Marvin"), + ) + +``` + + +Note that when using `flow.from_source(...).deploy(...)`, the flow you're deploying does not need to be available locally before running your script. + +#### Deploying via a Docker image + +If you currently bake your flow code into a Docker image before deploying, you can use the `image` argument of `flow.deploy` to build a Docker image as part of your deployment process: + +```python +from prefect import flow + +@flow(log_prints=True) +def my_flow(name: str = "world"): + print(f"Hello {name}! I'm a flow from a Docker image!") + + +if __name__ == "__main__": + my_flow.deploy( + name="my-deployment", + image="my-repo/my-image:latest", + work_pool_name="my-k8s-job", + job_variables=dict(pull_policy="Never"), + parameters=dict(name="Marvin"), + ) + +``` + + +You can skip a `flow.from_source` call when building an image with `flow.deploy`. Prefect will keep track of the flow's source code location in the image and load it from that location when the flow is executed. + +### Using `prefect deploy` + + +**Always run `prefect deploy` commands from the **root** level of your repo!** + + +With agents, you might have had multiple `deployment.yaml` files, but under worker deployment patterns, each repo will have a single `prefect.yaml` file located at the **root** of the repo that contains [deployment configuration](https://docs.prefect.io/concepts/deployments/#working-with-multiple-deployments) for all flows in that repo. + +To set up a new `prefect.yaml` file for your deployments, run the following command from the root level of your repo: + +This will start a wizard that will guide you through setting up your deployment. + + +**For step 4, select `y` on the last prompt to save the configuration for the deployment.** + +Saving the configuration for your deployment will result in a `prefect.yaml` file populated with your first deployment. You can use this YAML file to edit and [define multiple deployments](https://docs.prefect.io/concepts/deployments/#working-with-multiple-deployments) for this repo. + + +You can add more [deployments](https://docs.prefect.io/concepts/deployments/#deployment-declaration-reference) to the `deployments` list in your `prefect.yaml` file and/or by continuing to use the deployment creation wizard. + +For more information on deployments, check out our [in-depth guide for deploying flows to work pools](https://docs.prefect.io/guides/prefect-deploy/). \ No newline at end of file diff --git a/docs/2.19.x/integrations/contributing-integrations.mdx b/docs/2.19.x/integrations/contributing-integrations.mdx new file mode 100644 index 000000000000..2d0b30ce3681 --- /dev/null +++ b/docs/2.19.x/integrations/contributing-integrations.mdx @@ -0,0 +1,91 @@ +--- +sidebarTitle: Contributing Integrations +title: Contribute +--- + +We welcome contributors! You can help contribute blocks and integrations by following these steps. + +Contributing Blocks +------------------------------------------------------------- + +Building your own custom block is simple! + +1. Subclass from `Block`. +2. Add a description alongside an `Attributes` and `Example` section in the docstring. +3. Set a `_logo_url` to point to a relevant image. +4. Create the `pydantic.Field`s of the block with a type annotation, `default` or `default_factory`, and a short description about the field. +5. Define the methods of the block. + +For example, this is how the [Secret block is implemented](https://github.com/PrefectHQ/prefect/blob/main/src/prefect/blocks/system.py#L76-L102): + +```python +from pydantic import Field, SecretStr +from prefect.blocks.core import Block + +class Secret(Block): + """ + A block that represents a secret value. The value stored in this block will be obfuscated when + this block is logged or shown in the UI. + + Attributes: + value: A string value that should be kept secret. + + Example: + ```python + from prefect.blocks.system import Secret + secret_block = Secret.load("BLOCK_NAME") + + # Access the stored secret + secret_block.get() + ``` + """ + + _logo_url = "https://example.com/logo.png" + + value: SecretStr = Field( + default=..., description="A string value that should be kept secret." + ) # ... indicates it's a required field + + def get(self): + return self.value.get_secret_value() + +``` + + +To view in Prefect Cloud or the Prefect server UI, [register the block](https://docs.prefect.io/concepts/blocks/#registering-blocks-for-use-in-the-prefect-ui). + +Anyone can create and share a Prefect Integration and we encourage anyone interested in creating an integration to do so! + +### Generate a project + +To help you get started with your integration, we've created a template that gives the tools you need to create and publish your integration. + +Use the [Prefect Integration template](https://github.com/PrefectHQ/prefect-collection-template#quickstart) to get started creating an integration with a bootstrapped project! + +### List a project in the Integrations Catalog + +To list your integration in the Prefect Integrations Catalog, submit a PR to the Prefect repository adding a file to the `docs/integrations/catalog` directory with details about your integration. Please use `TEMPLATE.yaml` in that folder as a guide. + +Contribute fixes or enhancements to Integrations +----------------------------------------------------------------------------------------------------------------------- + +If you'd like to help contribute to fix an issue or add a feature to any of our Integrations, please [propose changes through a pull request from a fork of the repository](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork). + +1. [Fork the repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo#forking-a-repository) +2. [Clone the forked repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo#cloning-your-forked-repository) +3. Install the repository and its dependencies: + +``` +pip install -e ".[dev]" +``` + +4. Make desired changes +5. Add tests +6. Insert an entry to the Integration's CHANGELOG.md +7. Install `pre-commit` to perform quality checks prior to commit: + +``` +pre-commit install +``` + +8. `git commit`, `git push`, and create a pull request \ No newline at end of file diff --git a/src/prefect/deprecated/__init__.py b/docs/2.19.x/integrations/overview.mdx similarity index 100% rename from src/prefect/deprecated/__init__.py rename to docs/2.19.x/integrations/overview.mdx diff --git a/docs/2.19.x/integrations/using-integrations.mdx b/docs/2.19.x/integrations/using-integrations.mdx new file mode 100644 index 000000000000..8df261baa206 --- /dev/null +++ b/docs/2.19.x/integrations/using-integrations.mdx @@ -0,0 +1,95 @@ +--- +title: Using Integrations +--- + +Installing an Integration +------------------------------------------------------------------------- + +Install the Integration via `pip`. + +For example, to use `prefect-aws`: + +Registering Blocks from an Integration +--------------------------------------------------------------------------------------------------- + +Once the Prefect Integration is installed, [register the blocks](https://docs.prefect.io/concepts/blocks/#registering-blocks-for-use-in-the-prefect-ui) within the integration to view them in the Prefect Cloud UI: + +For example, to register the blocks available in `prefect-aws`: + +``` +prefect block register -m prefect_aws + +``` + + +**Updating blocks from an integrations** + + +If you install an updated Prefect integration that adds fields to a block type, you will need to re-register that block type. + + + +**Loading a block in code** + +To use the `load` method on a Block, you must already have a block document [saved](https://docs.prefect.io/concepts/blocks/#saving-blocks) either through code or through the Prefect UI. + + +Learn more about Blocks [here](https://docs.prefect.io/concepts/blocks)! + +Using Tasks and Flows from an Integration +--------------------------------------------------------------------------------------------------------- + +Integrations also contain pre-built tasks and flows that can be imported and called within your code. + +As an example, to read a secret from AWS Secrets Manager with the `read_secret` task: + +```python +from prefect import flow +from prefect_aws import AwsCredentials +from prefect_aws.secrets_manager import read_secret + +@flow +def connect_to_database(): + aws_credentials = AwsCredentials.load("MY_BLOCK_NAME") + secret_value = read_secret( + secret_name="db_password", + aws_credentials=aws_credentials + ) + + # Use secret_value to connect to a database + +``` + + +Customizing Tasks and Flows from an Integration +--------------------------------------------------------------------------------------------------------------------- + +To customize the settings of a task or flow pre-configured in a collection, use `with_options`: + +```python +from prefect import flow +from prefect_dbt.cloud import DbtCloudCredentials +from prefect_dbt.cloud.jobs import trigger_dbt_cloud_job_run_and_wait_for_completion + +custom_run_dbt_cloud_job = trigger_dbt_cloud_job_run_and_wait_for_completion.with_options( + name="Run My DBT Cloud Job", + retries=2, + retry_delay_seconds=10 +) + +@flow +def run_dbt_job_flow(): + run_result = custom_run_dbt_cloud_job( + dbt_cloud_credentials=DbtCloudCredentials.load("my-dbt-cloud-credentials"), + job_id=1 + ) + +run_dbt_job_flow() + +``` + + +Recipes and Tutorials +----------------------------------------------------------------- + +To learn more about how to use Integrations, check out [Prefect recipes](https://github.com/PrefectHQ/prefect-recipes#diving-deeper-) on GitHub. These recipes provide examples of how Integrations can be used in various scenarios. \ No newline at end of file diff --git a/docs/2.19.x/tutorial/deployments.mdx b/docs/2.19.x/tutorial/deployments.mdx new file mode 100644 index 000000000000..b6dc91f42deb --- /dev/null +++ b/docs/2.19.x/tutorial/deployments.mdx @@ -0,0 +1,173 @@ +--- +title: Deploying Flows +sidebarTitle: Deployments +--- + +**Reminder to connect to Prefect Cloud or a self-hosted Prefect server instance** + +Some features in this tutorial, such as scheduling, require you to be connected to a Prefect server. If using a self-hosted setup, run `prefect server start` to run both the webserver and UI. If using Prefect Cloud, make sure you have [successfully authenticated your local environment](https://docs.prefect.io/cloud/cloud-quickstart/). + + +Why Deployments? +---------------------------------------------------------------- + +Some of the most common reasons to use an orchestration tool such as Prefect are for [scheduling](https://docs.prefect.io/concepts/schedules/) and [event-based triggering](https://docs.prefect.io/concepts/automations/). Up to this point, we’ve demonstrated running Prefect flows as scripts, but this means _you_ have been the one triggering and managing flow runs. You can certainly continue to trigger your workflows in this way and use Prefect as a monitoring layer for other schedulers or systems, but you will miss out on many of the other benefits and features that Prefect offers. + +Deploying a flow exposes an API and UI so that you can: + +* trigger new runs, [cancel active runs](https://docs.prefect.io/concepts/flows/#cancel-a-flow-run), pause scheduled runs, customize parameters, and more +* remotely configure schedules and automation rules for your deployments +* dynamically provision infrastructure using [workers](https://docs.prefect.io/tutorials/workers/) + +What is a deployment? +---------------------------------------------------------------- + +Deploying a flow is the act of specifying where and how it will run. This information is encapsulated and sent to Prefect as a [deployment](https://docs.prefect.io/concepts/deployments/) that contains the crucial metadata needed for remote orchestration. Deployments elevate workflows from functions that you call manually to API-managed entities. + +Attributes of a deployment include (but are not limited to): + +* **Flow entrypoint**: path to your flow function +* **Schedule** or **Trigger**: optional schedule or triggering rules for this deployment +* **Tags**: optional text labels for organizing your deployments + +Create a deployment +------------------------------------------------------------- + +Using our `get_repo_info` flow from the previous sections, we can easily create a deployment for it by calling a single method on the flow object: `flow.serve`. + + +```python repo_info.py +import httpx +from prefect import flow + + +@flow(log_prints=True) +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + url = f"https://api.github.com/repos/{repo_name}" + response = httpx.get(url) + response.raise_for_status() + repo = response.json() + print(f"{repo_name} repository statistics 🤓:") + print(f"Stars 🌠 : {repo['stargazers_count']}") + print(f"Forks 🍴 : {repo['forks_count']}") + + +if __name__ == "__main__": + get_repo_info.serve(name="my-first-deployment") + +``` + + +Running this script will do two things: + +* create a deployment called "my-first-deployment" for your flow in the Prefect API +* stay running to listen for flow runs for this deployment; when a run is found, it will be _asynchronously executed within a subprocess_ + + +**Deployments must be defined in static files** + +Flows can be defined and run interactively, that is, within REPLs or Notebooks. Deployments, on the other hand, require that your flow definition be in a known file (which can be located on a remote filesystem in certain setups, as we'll see in the next section of the tutorial). + + +Because this deployment has no schedule or triggering automation, you will need to use the UI or API to create runs for it. Let's use the CLI (in a separate terminal window) to create a run for this deployment: + +``` +prefect deployment run 'get-repo-info/my-first-deployment' + +``` + + +If you are watching either your terminal or your UI, you should see the newly created run execute successfully! +Let's take this example further by adding a schedule and additional metadata. + +### Additional options + +The `serve` method on flows exposes many options for the deployment. Let's use a few of these options now: + +* `cron`: a keyword that allows us to set a cron string schedule for the deployment; see [schedules](https://docs.prefect.io/concepts/schedules/) for more advanced scheduling options +* `tags`: a keyword that allows us to tag this deployment and its runs for bookkeeping and filtering purposes +* `description`: a keyword that allows us to document what this deployment does; by default the description is set from the docstring of the flow function, but we did not document our flow function +* `version`: a keyword that allows us to track changes to our deployment; by default a hash of the file containing the flow is used; popular options include semver tags or git commit hashes + +Let's add these options to our deployment: + +```python +if __name__ == "__main__": + get_repo_info.serve( + name="my-first-deployment", + cron="* * * * *", + tags=["testing", "tutorial"], + description="Given a GitHub repository, logs repository statistics for that repo.", + version="tutorial/deployments", + ) + +``` + + +When you rerun this script, you will find an updated deployment in the UI that is actively scheduling work! +Stop the script in the CLI using `CTRL+C` and your schedule will be automatically paused. + + +**`.serve` is a long-running process** + +For remotely triggered or scheduled runs to be executed, your script with `flow.serve` must be actively running. + + +Running multiple deployments at once +----------------------------------------------------------------------------------------------- + +This method is useful for creating deployments for single flows, but what if we have two or more flows? This situation only requires a few additional method calls and imports to get up and running: + + +```python multi_flow_deployment.py + +import time +from prefect import flow, serve + + +@flow +def slow_flow(sleep: int = 60): + "Sleepy flow - sleeps the provided amount of time (in seconds)." + time.sleep(sleep) + + +@flow +def fast_flow(): + "Fastest flow this side of the Mississippi." + return + + +if __name__ == "__main__": + slow_deploy = slow_flow.to_deployment(name="sleeper", interval=45) + fast_deploy = fast_flow.to_deployment(name="fast") + serve(slow_deploy, fast_deploy) + +``` + + +A few observations: + +* the `flow.to_deployment` interface exposes the _exact same_ options as `flow.serve`; this method produces a deployment object +* the deployments are only registered with the API once `serve(...)` is called +* when serving multiple deployments, the only requirement is that they share a Python environment; they can be executed and scheduled independently of each other + +Spend some time experimenting with this setup. A few potential next steps for exploration include: + +* pausing and unpausing the schedule for the "sleeper" deployment +* using the UI to submit ad-hoc runs for the "sleeper" deployment with different values for `sleep` +* cancelling an active run for the "sleeper" deployment from the UI (good luck cancelling the "fast" one 😉) + + +**Hybrid execution option** + +Another implication of Prefect's deployment interface is that you can choose to use our hybrid execution model. Whether you use Prefect Cloud or host a Prefect server instance yourself, you can run work flows in the environments best suited to their execution. This model allows you efficient use of your infrastructure resources while maintaining the privacy of your code and data. There is no ingress required. For more information [read more about our hybrid model](https://www.prefect.io/security/overview/#hybrid-model). + + +Next steps +------------------------------------------- + +Congratulations! You now have your first working deployment. + +Deploying flows through the `serve` method is a fast way to start scheduling flows with Prefect. However, if your team has more complex infrastructure requirements or you'd like to have Prefect manage flow execution, you can deploy flows to a work pool. + +Learn about work pools and how Prefect Cloud can handle infrastructure configuration for you in the [next step of the tutorial](https://docs.prefect.io/tutorial/work-pools/). \ No newline at end of file diff --git a/docs/2.19.x/tutorial/flows.mdx b/docs/2.19.x/tutorial/flows.mdx new file mode 100644 index 000000000000..33c4e34365ca --- /dev/null +++ b/docs/2.19.x/tutorial/flows.mdx @@ -0,0 +1,199 @@ +--- +title: Flows +--- + +**Prerequisites** + +This tutorial assumes you have already installed Prefect and connected to Prefect Cloud or a self-hosted server instance. See the prerequisites section of the [tutorial](https://docs.prefect.io/tutorial/) for more details. + + +What is a flow? +---------------------------------------------------- + +[Flows](https://docs.prefect.io/concepts/flows/) are like functions. They can take inputs, perform work, and return an output. In fact, you can turn any function into a Prefect flow by adding the `@flow` decorator. When a function becomes a flow, its behavior changes, giving it the following advantages: + +* All runs of the flow have persistent [state](https://docs.prefect.io/concepts/states/). Transitions between states are recorded, allowing for flow execution to be observed and acted upon. +* Input arguments can be type validated as workflow parameters. +* Retries can be performed on failure. +* Timeouts can be enforced to prevent unintentional, long-running workflows. +* Metadata about [flow runs](#flow-runs), such as run time and final state, is automatically tracked. +* They can easily be elevated to a [deployment](https://docs.prefect.io/concepts/deployments/), which exposes a remote API for interacting with it + +Run your first flow +------------------------------------------------------------- + +The simplest way to get started with Prefect is to annotate a Python function with the `@flow` decorator. The script below fetches statistics about the [main Prefect repository](https://github.com/PrefectHQ/prefect). Note that [httpx](https://www.python-httpx.org/) is an HTTP client library and a dependency of Prefect. Let's turn this function into a Prefect flow and run the script: + + +```python repo_info.py +import httpx +from prefect import flow + + +@flow +def get_repo_info(): + url = "https://api.github.com/repos/PrefectHQ/prefect" + response = httpx.get(url) + response.raise_for_status() + repo = response.json() + print("PrefectHQ/prefect repository statistics 🤓:") + print(f"Stars 🌠 : {repo['stargazers_count']}") + print(f"Forks 🍴 : {repo['forks_count']}") + +if __name__ == "__main__": + get_repo_info() + +``` + + +Running this file will result in some interesting output: + +```python +12:47:42.792 | INFO | prefect.engine - Created flow run 'ludicrous-warthog' for flow 'get-repo-info' +PrefectHQ/prefect repository statistics 🤓: +Stars 🌠 : 12146 +Forks 🍴 : 1245 +12:47:45.008 | INFO | Flow run 'ludicrous-warthog' - Finished in state Completed() + +``` + + +**Flows can contain arbitrary Python** + +As we can see above, flow definitions can contain arbitrary Python logic. + + +Parameters +------------------------------------------- + +As with any Python function, you can pass arguments to a flow. The positional and keyword arguments defined on your flow function are called [parameters](https://docs.prefect.io/concepts/flows/#parameters). Prefect will automatically perform type conversion using any provided type hints. Let's make the repository a string parameter with a default value: + + +```python repo_info.py +import httpx +from prefect import flow + + +@flow +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + url = f"https://api.github.com/repos/{repo_name}" + response = httpx.get(url) + response.raise_for_status() + repo = response.json() + print(f"{repo_name} repository statistics 🤓:") + print(f"Stars 🌠 : {repo['stargazers_count']}") + print(f"Forks 🍴 : {repo['forks_count']}") + + +if __name__ == "__main__": + get_repo_info(repo_name="PrefectHQ/marvin") + +``` + + +We can call our flow with varying values for the `repo_name` parameter (including "bad" values): + +``` +python repo_info.py +``` + +Try passing `repo_name="missing-org/missing-repo"`. + +You should see + +``` +HTTPStatusError: Client error '404 Not Found' for url '' + +``` + + +Now navigate to your Prefect dashboard and compare the displays for these two runs. + +Logging +------------------------------------- + +Prefect enables you to log a variety of useful information about your flow and task runs, capturing information about your workflows for purposes such as monitoring, troubleshooting, and auditing. If we navigate to our dashboard and explore the runs we created above, we will notice that the repository statistics are not captured in the flow run logs. +Let's fix that by adding some [logging](https://docs.prefect.io/concepts/logs) to our flow: + + +```python repo_info.py +import httpx +from prefect import flow, get_run_logger + + +@flow +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + url = f"https://api.github.com/repos/{repo_name}" + response = httpx.get(url) + response.raise_for_status() + repo = response.json() + logger = get_run_logger() + logger.info("%s repository statistics 🤓:", repo_name) + logger.info(f"Stars 🌠 : %d", repo["stargazers_count"]) + logger.info(f"Forks 🍴 : %d", repo["forks_count"]) + +``` + + +Now the output looks more consistent _and_, more importantly, our statistics are stored in the Prefect backend and displayed in the UI for this flow run: + +```python +12:47:42.792 | INFO | prefect.engine - Created flow run 'ludicrous-warthog' for flow 'get-repo-info' +12:47:43.016 | INFO | Flow run 'ludicrous-warthog' - PrefectHQ/prefect repository statistics 🤓: +12:47:43.016 | INFO | Flow run 'ludicrous-warthog' - Stars 🌠 : 12146 +12:47:43.042 | INFO | Flow run 'ludicrous-warthog' - Forks 🍴 : 1245 +12:47:45.008 | INFO | Flow run 'ludicrous-warthog' - Finished in state Completed() + +``` + + +**`log_prints=True`** + + +We could have achieved the exact same outcome by using Prefect's convenient `log_prints` keyword argument in the `flow` decorator: + +```python +@flow(log_prints=True) +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + ... + +``` + + + +**Logging vs Artifacts** + +The example above is for educational purposes. In general, it is better to use [Prefect artifacts](https://docs.prefect.io/concepts/artifacts/) for storing metrics and output. Logs are best for tracking progress and debugging errors. + + +Retries +------------------------------------- + +So far our script works, but in the future unexpected errors may occur. For example the GitHub API may be temporarily unavailable or rate limited. [Retries](https://docs.prefect.io/concepts/flows/#flow-settings) help make our flow more resilient. Let's add retry functionality to our example above: + + +```python repo_info.py +import httpx +from prefect import flow + + +@flow(retries=3, retry_delay_seconds=5, log_prints=True) +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + url = f"https://api.github.com/repos/{repo_name}" + response = httpx.get(url) + response.raise_for_status() + repo = response.json() + print(f"{repo_name} repository statistics 🤓:") + print(f"Stars 🌠 : {repo['stargazers_count']}") + print(f"Forks 🍴 : {repo['forks_count']}") + +if __name__ == "__main__": + get_repo_info() + +``` + + +[Next: Tasks](https://docs.prefect.io/tutorial/tasks/) +--------------------------------------------------------------------------------------- + +As you have seen, adding a flow decorator converts our Python function to a resilient and observable workflow. In the next section, you'll supercharge this flow by using tasks to break down the workflow's complexity and make it more performant and observable - [click here to continue](https://docs.prefect.io/tutorial/tasks/). \ No newline at end of file diff --git a/docs/2.19.x/tutorial/overview.mdx b/docs/2.19.x/tutorial/overview.mdx new file mode 100644 index 000000000000..e47540d566ce --- /dev/null +++ b/docs/2.19.x/tutorial/overview.mdx @@ -0,0 +1,53 @@ +--- +title: Tutorial Overview +sidebarTitle: Overview +--- +Prefect orchestrates workflows — it simplifies the creation, scheduling, and monitoring of complex data pipelines. You define workflows as Python code and Prefect handles the rest. + +Prefect also provides error handling, retry mechanisms, and a user-friendly dashboard for monitoring. It's the easiest way to transform any Python function into a unit of work that can be observed and orchestrated. + +This tutorial provides a guided walk-through of Prefect's core concepts and instructions on how to use them. + +You will: + +1. [Create a flow](https://docs.prefect.io/tutorial/flows/) +2. [Add tasks to it](https://docs.prefect.io/tutorial/tasks/) +3. [Deploy and run the flow locally](https://docs.prefect.io/tutorial/deployments/) +4. [Create a work pool and run the flow on remote infrastructure](https://docs.prefect.io/tutorial/work-pools/) + +These four topics will get most users to their first production deployment. + +Advanced users that need more governance and control of their workflow infrastructure can go one step further by: + +5.[Using a worker-based deployment](https://docs.prefect.io/tutorial/workers/) + +If you're looking for examples of more advanced operations (such as [deploying on Kubernetes](https://docs.prefect.io/guides/deployment/kubernetes/)), check out Prefect's [guides](https://docs.prefect.io/guides/). + +Compared to the [Quickstart](https://docs.prefect.io/getting-started/quickstart/), this tutorial is a more in-depth guide to Prefect's functionality. You will also see how to customize the Docker image where your flow runs and learn how to run flows on your own infrastructure. + +Prerequisites +------------------------------------------------- + +Before you start, make sure you have Python installed in a virtual environment. Then install Prefect: + +``` +pip install -U prefect +``` + +See the [install guide](https://docs.prefect.io/getting-started/installation/) for more detailed instructions. + +To get the most out of Prefect, you need to connect to a forever-free [Prefect Cloud](https://app.prefect.cloud/) account. + + 1. Create a new account or sign in at [https://app.prefect.cloud/](https://app.prefect.cloud/). + 2. Use the `prefect cloud login` CLI command to [authenticate to Prefect Cloud](https://docs.prefect.io/cloud/users/api-keys/) from your environment. + +Choose **Log in with a web browser** and click the **Authorize** button in the browser window that opens. + +If you have any issues with browser-based authentication, see the [Prefect Cloud docs](https://docs.prefect.io/cloud/users/api-keys/) to learn how to authenticate with a manually created API key. + +As an alternative to using Prefect Cloud, you can self-host a [Prefect server instance](https://docs.prefect.io/host/). If you choose this option, run `prefect server start` to start a local Prefect server instance. + +[First steps: Flows](https://docs.prefect.io/tutorial/flows/) +----------------------------------------------------------------------------------------------------- + +Let's begin by learning how to create your first Prefect flow - [click here to get started](https://docs.prefect.io/tutorial/flows/). \ No newline at end of file diff --git a/docs/2.19.x/tutorial/tasks.mdx b/docs/2.19.x/tutorial/tasks.mdx new file mode 100644 index 000000000000..8ebdadcd8eb6 --- /dev/null +++ b/docs/2.19.x/tutorial/tasks.mdx @@ -0,0 +1,245 @@ +--- +title: Tasks +--- + +What is a task? +---------------------------------------------------- + +A [task](https://docs.prefect.io/concepts/tasks/) is any Python function decorated with a `@task` decorator. You can think of a flow as a recipe for connecting a known sequence of tasks together. Tasks, and the dependencies between them, are displayed in the flow run graph, enabling you to break down a complex flow into something you can observe, understand and control at a more granular level. +When a function becomes a task, it can be executed concurrently and its return value can be cached. + +Flows and tasks share some common features: + +* Both are defined easily using their respective decorator, which accepts settings for that flow / task (see all [task settings](https://docs.prefect.io/concepts/tasks/#task-arguments) / [flow settings](https://docs.prefect.io/concepts/flows/#flow-settings)). +* Each can be given a `name`, `description` and `tags` for organization and bookkeeping. +* Both provide functionality for retries, timeouts, and other hooks to handle failure and completion events. + +Network calls (such as our `GET` requests to the GitHub API) are particularly useful as tasks because they take advantage of task features such as [retries](https://docs.prefect.io/concepts/tasks/#retries), [caching](https://docs.prefect.io/concepts/tasks/#caching), and [concurrency](https://docs.prefect.io/concepts/task-runners/#using-a-task-runner). + +**Tasks may be called from other tasks** + +As of `prefect 2.18.x`, tasks can be called from within other tasks. This removes the need to use subflows for simple task composition. + + + +**When to use tasks** + +Not all functions in a flow need be tasks. Use them only when their features are useful. + + +Let's take our flow from before and move the request into a task: + +repo\_info.py + +```python repo_info.py +import httpx +from prefect import flow, task +from typing import Optional + + +@task +def get_url(url: str, params: Optional[dict[str, any]] = None): + response = httpx.get(url, params=params) + response.raise_for_status() + return response.json() + + +@flow(retries=3, retry_delay_seconds=5, log_prints=True) +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + url = f"https://api.github.com/repos/{repo_name}" + repo_stats = get_url(url) + print(f"{repo_name} repository statistics 🤓:") + print(f"Stars 🌠 : {repo_stats['stargazers_count']}") + print(f"Forks 🍴 : {repo_stats['forks_count']}") + +if __name__ == "__main__": + get_repo_info() + +``` + + +Running the flow in your terminal will result in something like this: + +```python +09:55:55.412 | INFO | prefect.engine - Created flow run 'great-ammonite' for flow 'get-repo-info' +09:55:55.499 | INFO | Flow run 'great-ammonite' - Created task run 'get_url-0' for task 'get_url' +09:55:55.500 | INFO | Flow run 'great-ammonite' - Executing 'get_url-0' immediately... +09:55:55.825 | INFO | Task run 'get_url-0' - Finished in state Completed() +09:55:55.827 | INFO | Flow run 'great-ammonite' - PrefectHQ/prefect repository statistics 🤓: +09:55:55.827 | INFO | Flow run 'great-ammonite' - Stars 🌠 : 12157 +09:55:55.827 | INFO | Flow run 'great-ammonite' - Forks 🍴 : 1251 +09:55:55.849 | INFO | Flow run 'great-ammonite' - Finished in state Completed('All states completed.') + +``` + + +And you should now see this task run tracked in the UI as well. + +Caching +------------------------------------- + +Tasks support the ability to cache their return value. Caching allows you to efficiently reuse [results](https://docs.prefect.io/concepts/results/) of tasks that may be expensive to reproduce with every flow run, or reuse cached results if the inputs to a task have not changed. + +To enable caching, specify a `cache_key_fn` — a function that returns a cache key — on your task. You may optionally provide a `cache_expiration` timedelta indicating when the cache expires. You can define a task that is cached based on its inputs by using the Prefect [`task_input_hash`](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.task_input_hash). Let's add caching to our `get_url` task: + +```python +import httpx +from datetime import timedelta +from prefect import flow, task, get_run_logger +from prefect.tasks import task_input_hash +from typing import Optional + + +@task(cache_key_fn=task_input_hash, + cache_expiration=timedelta(hours=1), + ) +def get_url(url: str, params: Optional[dict[str, any]] = None): + response = httpx.get(url, params=params) + response.raise_for_status() + return response.json() + +``` + + +You can test this caching behavior by using a personal repository as your workflow parameter - give it a star, or remove a star and see how the output of this task changes (or doesn't) by running your flow multiple times. + +**Task results and caching** + +Task results are cached in memory during a flow run and persisted to your home directory by default. Prefect Cloud only stores the cache key, not the data itself. + + +Concurrency +--------------------------------------------- + +Tasks enable concurrency, allowing you to execute multiple tasks asynchronously. This concurrency can greatly enhance the efficiency and performance of your workflows. Let's expand our script to calculate the average open issues per user. This will require making more requests: + + +```python repo_info.py +import httpx +from datetime import timedelta +from prefect import flow, task +from prefect.tasks import task_input_hash +from typing import Optional + + +@task(cache_key_fn=task_input_hash, cache_expiration=timedelta(hours=1)) +def get_url(url: str, params: Optional[dict[str, any]] = None): + response = httpx.get(url, params=params) + response.raise_for_status() + return response.json() + + +def get_open_issues(repo_name: str, open_issues_count: int, per_page: int = 100): + issues = [] + pages = range(1, -(open_issues_count // -per_page) + 1) + for page in pages: + issues.append( + get_url( + f"https://api.github.com/repos/{repo_name}/issues", + params={"page": page, "per_page": per_page, "state": "open"}, + ) + ) + return [i for p in issues for i in p] + + +@flow(retries=3, retry_delay_seconds=5, log_prints=True) +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + repo_stats = get_url(f"https://api.github.com/repos/{repo_name}") + issues = get_open_issues(repo_name, repo_stats["open_issues_count"]) + issues_per_user = len(issues) / len(set([i["user"]["id"] for i in issues])) + print(f"{repo_name} repository statistics 🤓:") + print(f"Stars 🌠 : {repo_stats['stargazers_count']}") + print(f"Forks 🍴 : {repo_stats['forks_count']}") + print(f"Average open issues per user 💌 : {issues_per_user:.2f}") + + +if __name__ == "__main__": + get_repo_info() + +``` + + +Now we're fetching the data we need, but the requests are happening sequentially. Tasks expose a [`submit`](https://docs.prefect.io/api-ref/prefect/tasks/#prefect.tasks.Task.submit) method that changes the execution from sequential to concurrent. In our specific example, we also need to use the [`result`](https://docs.prefect.io/api-ref/prefect/futures/#prefect.futures.PrefectFuture.result) method because we are unpacking a list of return values: + +```python +def get_open_issues(repo_name: str, open_issues_count: int, per_page: int = 100): + issues = [] + pages = range(1, -(open_issues_count // -per_page) + 1) + for page in pages: + issues.append( + get_url.submit( + f"https://api.github.com/repos/{repo_name}/issues", + params={"page": page, "per_page": per_page, "state": "open"}, + ) + ) + return [i for p in issues for i in p.result()] + +``` + + +The logs show that each task is running concurrently: + +``` +12:45:28.241 | INFO | prefect.engine - Created flow run 'intrepid-coua' for flow 'get-repo-info' +12:45:28.311 | INFO | Flow run 'intrepid-coua' - Created task run 'get_url-0' for task 'get_url' +12:45:28.312 | INFO | Flow run 'intrepid-coua' - Executing 'get_url-0' immediately... +12:45:28.543 | INFO | Task run 'get_url-0' - Finished in state Completed() +12:45:28.583 | INFO | Flow run 'intrepid-coua' - Created task run 'get_url-1' for task 'get_url' +12:45:28.584 | INFO | Flow run 'intrepid-coua' - Submitted task run 'get_url-1' for execution. +12:45:28.594 | INFO | Flow run 'intrepid-coua' - Created task run 'get_url-2' for task 'get_url' +12:45:28.594 | INFO | Flow run 'intrepid-coua' - Submitted task run 'get_url-2' for execution. +12:45:28.609 | INFO | Flow run 'intrepid-coua' - Created task run 'get_url-4' for task 'get_url' +12:45:28.610 | INFO | Flow run 'intrepid-coua' - Submitted task run 'get_url-4' for execution. +12:45:28.624 | INFO | Flow run 'intrepid-coua' - Created task run 'get_url-5' for task 'get_url' +12:45:28.625 | INFO | Flow run 'intrepid-coua' - Submitted task run 'get_url-5' for execution. +12:45:28.640 | INFO | Flow run 'intrepid-coua' - Created task run 'get_url-6' for task 'get_url' +12:45:28.641 | INFO | Flow run 'intrepid-coua' - Submitted task run 'get_url-6' for execution. +12:45:28.708 | INFO | Flow run 'intrepid-coua' - Created task run 'get_url-3' for task 'get_url' +12:45:28.708 | INFO | Flow run 'intrepid-coua' - Submitted task run 'get_url-3' for execution. +12:45:29.096 | INFO | Task run 'get_url-6' - Finished in state Completed() +12:45:29.565 | INFO | Task run 'get_url-2' - Finished in state Completed() +12:45:29.721 | INFO | Task run 'get_url-5' - Finished in state Completed() +12:45:29.749 | INFO | Task run 'get_url-4' - Finished in state Completed() +12:45:29.801 | INFO | Task run 'get_url-3' - Finished in state Completed() +12:45:29.817 | INFO | Task run 'get_url-1' - Finished in state Completed() +12:45:29.820 | INFO | Flow run 'intrepid-coua' - PrefectHQ/prefect repository statistics 🤓: +12:45:29.820 | INFO | Flow run 'intrepid-coua' - Stars 🌠 : 12159 +12:45:29.821 | INFO | Flow run 'intrepid-coua' - Forks 🍴 : 1251 +Average open issues per user 💌 : 2.27 +12:45:29.838 | INFO | Flow run 'intrepid-coua' - Finished in state Completed('All states completed.') + +``` + + +Subflows +--------------------------------------- + +Not only can you call tasks within a flow, but you can also call other flows! Child flows are called [subflows](https://docs.prefect.io/concepts/flows/#composing-flows) and allow you to efficiently manage, track, and version common multi-task logic. + +Subflows are a great way to organize your workflows and offer more visibility within the UI. + +Let's add a `flow` decorator to our `get_open_issues` function: + +```python +@flow +def get_open_issues(repo_name: str, open_issues_count: int, per_page: int = 100): + issues = [] + pages = range(1, -(open_issues_count // -per_page) + 1) + for page in pages: + issues.append( + get_url.submit( + f"https://api.github.com/repos/{repo_name}/issues", + params={"page": page, "per_page": per_page, "state": "open"}, + ) + ) + return [i for p in issues for i in p.result()] + +``` + + +Whenever we run the parent flow, a new run will be generated for related functions within that as well. Not only is this run tracked as a subflow run of the main flow, but you can also inspect it independently in the UI! + +[Next: Deployments](https://docs.prefect.io/tutorial/deployments/) +--------------------------------------------------------------------------------------------------------- + +We now have a flow with tasks, subflows, retries, logging, caching, and concurrent execution. In the next section, we'll see how we can deploy this flow in order to run it on a schedule and/or external infrastructure - [click here to learn how to create your first deployment](https://docs.prefect.io/tutorial/deployments/). \ No newline at end of file diff --git a/docs/2.19.x/tutorial/work-pools.mdx b/docs/2.19.x/tutorial/work-pools.mdx new file mode 100644 index 000000000000..ea56afae420a --- /dev/null +++ b/docs/2.19.x/tutorial/work-pools.mdx @@ -0,0 +1,302 @@ +--- +title: Work Pools +--- + +Why work pools? +---------------------------------------------------- + +Work pools are a bridge between the Prefect orchestration layer and infrastructure for flow runs that can be dynamically provisioned. To transition from persistent infrastructure to dynamic infrastructure, use `flow.deploy` instead of `flow.serve`. + +## [Choosing Between `flow.deploy()` and `flow.serve()`](https://docs.prefect.io/concepts/deployments/#two-approaches-to-deployments) + +Earlier in the tutorial you used `serve` to deploy your flows. For many use cases, `serve` is sufficient to meet scheduling and orchestration needs. Work pools are **optional**. If infrastructure needs escalate, work pools can become a handy tool. The best part? You're not locked into one method. You can seamlessly combine approaches as needed. + + + +**Deployment definition methods differ slightly for work pools** + +When you use work-pool-based execution, you define deployments differently. Deployments for workers are configured with `deploy`, which requires additional configuration. A deployment created with `serve` cannot be used with a work pool. + +The primary reason to use work pools is for **dynamic infrastructure provisioning and configuration**. For example, you might have a workflow that has expensive infrastructure requirements and is run infrequently. In this case, you don't want an idle process running within that infrastructure. + +Other advantages to using work pools include: + +* You can configure default infrastructure configurations on your work pools that all jobs inherit and can override. +* Platform teams can use work pools to expose opinionated (and enforced!) interfaces to the infrastructure that they oversee. +* Work pools can be used to prioritize (or limit) flow runs through the use of [work queues](https://docs.prefect.io/concepts/work-pools/#work-queues). + +Prefect provides several [types of work pools](https://docs.prefect.io/concepts/work-pools/#work-pool-types). Prefect Cloud provides a Prefect Managed work pool option that is the simplest way to run workflows remotely. A cloud-provider account, such as AWS, is not required with a Prefect Managed work pool. + +Set up a work pool +----------------------------------------------------------- + +**Prefect Cloud** + +This tutorial uses Prefect Cloud to deploy flows to work pools. Managed execution and push work pools are available in [Prefect Cloud](https://www.prefect.io/cloud) only. If you are not using Prefect Cloud, please learn about work pools below and then proceed to the [next tutorial](https://docs.prefect.io/tutorial/workers/) that uses worker-based work pools. + + +### Create a Prefect Managed work pool + +In your terminal, run the following command to set up a work pool named `my-managed-pool` of type `prefect:managed`. + +``` +prefect work-pool create my-managed-pool --type prefect:managed + +``` + + +Let’s confirm that the work pool was successfully created by running the following command. + +``` +prefect work-pool ls +``` + +You should see your new `my-managed-pool` in the output list. + +Finally, let’s double check that you can see this work pool in the UI. + +Navigate to the **Work Pools** tab and verify that you see `my-managed-pool` listed. + +Feel free to select **Edit** from the three-dot menu on right of the work pool card to view the details of your work pool. + +Work pools contain configuration that is used to provision infrastructure for flow runs. For example, you can specify additional Python packages or environment variables that should be set for all deployments that use this work pool. Note that individual deployments can override the work pool configuration. + +Now that you’ve set up your work pool, we can deploy a flow to this work pool. Let's deploy your tutorial flow to `my-managed-pool`. + +Create the deployment +----------------------------------------------------------------- + +From our previous steps, we now have: + +1. [A flow](https://docs.prefect.io/tutorial/flows/) +2. A work pool + +Let's update our `repo_info.py` file to create a deployment in Prefect Cloud. + +The updates that we need to make to `repo_info.py` are: + +1. Change `flow.serve` to `flow.deploy`. +2. Tell `flow.deploy` which work pool to deploy to. + +Here's what the updated `repo_info.py` looks like: + + +```python repo_info.py +import httpx +from prefect import flow + + +@flow(log_prints=True) +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + url = f"https://api.github.com/repos/{repo_name}" + response = httpx.get(url) + response.raise_for_status() + repo = response.json() + print(f"{repo_name} repository statistics 🤓:") + print(f"Stars 🌠 : {repo['stargazers_count']}") + print(f"Forks 🍴 : {repo['forks_count']}") + + +if __name__ == "__main__": + get_repo_info.from_source( + source="https://github.com/prefecthq/demos.git", + entrypoint="repo_info.py:get_repo_info" + ).deploy( + name="my-first-deployment", + work_pool_name="my-managed-pool", + ) + +``` + + +In the `from_source` method, we specify the source of our flow code. + +In the `deploy` method, we specify the name of our deployment and the name of the work pool that we created earlier. + +You can store your flow code in any of several types of remote storage. In this example, we use a GitHub repository, but you could use a Docker image, as you'll see in an upcoming section of the tutorial. Alternatively, you could store your flow code in cloud provider storage such as AWS S3, or within a different git-based cloud provider such as GitLab or Bitbucket. + +**Note** + +In the example above, we store our code in a GitHub repository. If you make changes to the flow code, you will need to push those changes to your own GitHub account and update the `source` argument of `from_source` to point to your repository. + + +Now that you've updated your script, you can run it to register your deployment on Prefect Cloud: +``` +python repo_info.py +``` + +You should see a message in the CLI that your deployment was created with instructions for how to run it. + +``` +Successfully created/updated all deployments! + + Deployments +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━┓ +┃ Name ┃ Status ┃ Details ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━┩ +│ get-repo-info/my-first-deployment | applied │ │ +└───────────────────────────────────┴─────────┴─────────┘ + +To schedule a run for this deployment, use the following command: + + $ prefect deployment run 'get-repo-info/my-first-deployment' + + +You can also run your flow via the Prefect UI: https://app.prefect.cloud/account/ +abc/workspace/123/deployments/deployment/xyz + +``` + + +Navigate to your Prefect Cloud UI and view your new deployment. Click the **Run** button to trigger a run of your deployment. + +Because this deployment was configured with a Prefect Managed work pool, Prefect Cloud will run your flow on your behalf. + +View the logs in the UI. + +### Schedule a deployment run + +Now everything is set up for us to submit a flow-run to the work pool. Go ahead and run the deployment from the CLI or the UI. + +``` +prefect deployment run 'get_repo_info/my-deployment' + +``` + + +Prefect Managed work pools are a great way to get started with Prefect. +See the [Managed Execution guide](https://docs.prefect.io/guides/managed-execution/) for more details. + +Many users will find that they need more control over the infrastructure that their flows run on. Prefect Cloud's push work pools are a popular option in those cases. + + +Push work pools with automatic infrastructure provisioning +------------------------------------------------------------------------------------------------------------------------------------------- + +Serverless push work pools scale infinitely and provide more configuration options than Prefect Managed work pools. + +Prefect provides push work pools for AWS ECS on Fargate, Azure Container Instances, Google Cloud Run, and Modal. To use a push work pool, you will need an account with sufficient permissions on the cloud provider that you want to use. We'll use GCP for this example. + +Setting up the cloud provider pieces for infrastructure can be tricky and time consuming. Fortunately, Prefect can automatically provision infrastructure for you and wire it all together to work with your push work pool. + +### Create a push work pool with automatic infrastructure provisioning + +In your terminal, run the following command to set up a **push work pool**. + +Install the [gcloud CLI](https://cloud.google.com/sdk/docs/install) and [authenticate with your GCP project](https://cloud.google.com/docs/authentication/gcloud). + +If you already have the gcloud CLI installed, be sure to update to the latest version with `gcloud components update`. + +You will need the following permissions in your GCP project: + +* resourcemanager.projects.list +* serviceusage.services.enable +* iam.serviceAccounts.create +* iam.serviceAccountKeys.create +* resourcemanager.projects.setIamPolicy +* artifactregistry.repositories.create + +Docker is also required to build and push images to your registry. You can install Docker [here](https://docs.docker.com/get-docker/). + +Run the following command to set up a work pool named `my-cloud-run-pool` of type `cloud-run:push`. + +``` +prefect work-pool create --type cloud-run:push --provision-infra my-cloud-run-pool + +``` + + +Using the `--provision-infra` flag allows you to select a GCP project to use for your work pool and automatically configure it to be ready to execute flows via Cloud Run. In your GCP project, this command will activate the Cloud Run API, create a service account, and create a key for the service account, if they don't already exist. In your Prefect workspace, this command will create a [`GCPCredentials` block](https://prefecthq.github.io/prefect-gcp/credentials/) for storing the service account key. + +Here's an abbreviated example output from running the command: + +``` +╭──────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ Provisioning infrastructure for your work pool my-cloud-run-pool will require: │ +│ │ +│ Updates in GCP project central-kit-405415 in region us-central1 │ +│ │ +│ - Activate the Cloud Run API for your project │ +│ - Activate the Artifact Registry API for your project │ +│ - Create an Artifact Registry repository named prefect-images │ +│ - Create a service account for managing Cloud Run jobs: prefect-cloud-run │ +│ - Service account will be granted the following roles: │ +│ - Service Account User │ +│ - Cloud Run Developer │ +│ - Create a key for service account prefect-cloud-run │ +│ │ +│ Updates in Prefect workspace │ +│ │ +│ - Create GCP credentials block my--pool-push-pool-credentials to store the service account key │ +│ │ +╰──────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +Proceed with infrastructure provisioning? [y/n]: y +Activating Cloud Run API +Activating Artifact Registry API +Creating Artifact Registry repository +Configuring authentication to Artifact Registry +Setting default Docker build namespace +Creating service account +Assigning roles to service account +Creating service account key +Creating GCP credentials block +Provisioning Infrastructure ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 100% 0:00:00 +Infrastructure successfully provisioned! +Created work pool 'my-cloud-run-pool'! + +``` + + +After infrastructure provisioning completes, you will be logged into your new Artifact Registry repository and the default Docker build namespace will be set to the URL of the repository. + +While the default namespace is set, any images you build without specifying a registry or username/organization will be pushed to the repository. + +To take advantage of this functionality, you can write your deploy script like this: + + +```python example_deploy_script.py + +from prefect import flow +from prefect.deployments import DeploymentImage + + +@flow(log_prints=True) +def my_flow(name: str = "world"): + print(f"Hello {name}! I'm a flow running on Cloud Run!") + + +if __name__ == "__main__": + my_flow.deploy( + name="my-deployment", + work_pool_name="above-ground", + cron="0 1 * * *", + image=DeploymentImage( + name="my-image:latest", + platform="linux/amd64", + ) + ) + +``` + + +Run the script to create the deployment on the Prefect Cloud server. + +Running this script will build a Docker image with the tag `-docker.pkg.dev///my-image:latest` and push it to your repository. + +**Tip** + +Make sure you have Docker running locally before running this script. + + +Note that you only need to include an object of the `DeploymentImage` class with the argument `platform="linux/amd64` if you're building your image on a machine with an ARM-based processor. Otherwise, you could just pass `image="my-image:latest"` to `deploy`. + +Also note that the `cron` argument will schedule the deployment to run at 1am every day. See the [schedules](https://docs.prefect.io/concepts/schedules/) docs for more information on scheduling options. + +See the [Push Work Pool guide](https://docs.prefect.io/guides/deployment/push-work-pools/) for more details and example commands for each cloud provider. + +Next step +----------------------------------------- + +Congratulations! You've learned how to deploy flows to work pools. If these work pool options meet all of your needs, we encourage you to go deeper with the [concepts docs](https://docs.prefect.io/concepts/) or explore our [how-to guides](https://docs.prefect.io/guides/) to see examples of particular Prefect use cases. + +However, if you need more control over your infrastructure, want to run your workflows in Kubernetes, or are running a self-hosted Prefect server instance, we encourage you to see the [next section of the tutorial](https://docs.prefect.io/tutorial/workers/). There you'll learn how to use work pools that rely on a worker and see how to customize Docker images for container-based infrastructure. \ No newline at end of file diff --git a/docs/2.19.x/tutorial/workers.mdx b/docs/2.19.x/tutorial/workers.mdx new file mode 100644 index 000000000000..862a00bc876b --- /dev/null +++ b/docs/2.19.x/tutorial/workers.mdx @@ -0,0 +1,265 @@ +--- +title: Workers +--- + +Prerequisites +------------------------------------------------- + +[Docker](https://docs.docker.com/engine/install/) installed and running on your machine. + +Why workers +--------------------------------------------- + +In the previous section of the tutorial, you learned how work pools are a bridge between the Prefect orchestration layer and infrastructure for flow runs that can be dynamically provisioned. You saw how you can transition from persistent infrastructure to dynamic infrastructure by using `flow.deploy` instead of `flow.serve`. + +Work pools that rely on client-side workers take this a step further by enabling you to run work flows in your own Docker containers, Kubernetes clusters, and serverless environments such as AWS ECS, Azure Container Instances, and GCP Cloud Run. + +The architecture of a worker-based work pool deployment can be summarized with the following diagram: + +![](/images/worker1.png) + +Notice above that the worker is in charge of provisioning the _flow run infrastructure_. In context of this tutorial, that flow run infrastructure is an ephemeral Docker container to host each flow run. Different [worker types](https://docs.prefect.io/concepts/work-pools/#worker-types) create different types of flow run infrastructure. + +Now that we’ve reviewed the concepts of a work pool and worker, let’s create them so that you can deploy your tutorial flow, and execute it later using the Prefect API. + +Set up a work pool and worker +--------------------------------------------------------------------------------- + +For this tutorial you will create a **Docker** type work pool via the CLI. + +Using the **Docker** work pool type means that all work sent to this work pool will run within a dedicated Docker container using a Docker client available to the worker. + +**Other work pool types** + +There are [work pool types](https://docs.prefect.io/concepts/work-pools/#worker-types) for serverless computing environments such as AWS ECS, Azure Container Instances, Google Cloud Run, and Vertex AI. Kubernetes is also a popular work pool type. + +These options are expanded upon in various [How-to Guides](https://docs.prefect.io/guides/). + + + + +### Create a work pool + +In your terminal, run the following command to set up a **Docker** type work pool. + +``` +prefect work-pool create --type docker my-docker-pool + +``` + + +Let’s confirm that the work pool was successfully created by running the following command in the same terminal. + +``` +prefect work-pool ls +``` + +You should see your new `my-docker-pool` listed in the output. + +Finally, let’s double check that you can see this work pool in your Prefect UI. + +Navigate to the **Work Pools** tab and verify that you see `my-docker-pool` listed. + +When you click into `my-docker-pool` you should see a red status icon signifying that this work pool is not ready. + +To make the work pool ready, you need to start a worker. + +### Start a worker + +Workers are a lightweight polling process that kick off scheduled flow runs on a specific type of infrastructure (such as Docker). To start a worker on your local machine, open a new terminal and confirm that your virtual environment has `prefect` installed. + +Run the following command in this new terminal to start the worker: + +``` +prefect worker start --pool my-docker-pool + +``` + + +You should see the worker start. It's now polling the Prefect API to check for any scheduled flow runs it should pick up and then submit for execution. You’ll see your new worker listed in the UI under the **Workers** tab of the Work Pools page with a recent last polled date. + +You should also be able to see a `Ready` status indicator on your work pool - progress! + +You will need to keep this terminal session active for the worker to continue to pick up jobs. Since you are running this worker locally, the worker will terminate if you close the terminal. Therefore, in a production setting this worker should run as a [daemonized or managed process](https://docs.prefect.io/guides/deployment/daemonize/). + +Now that you’ve set up your work pool and worker, we have what we need to kick off and execute flow runs of flows deployed to this work pool. Let's deploy your tutorial flow to `my-docker-pool`. + +Create the deployment +----------------------------------------------------------------- + +From our previous steps, we now have: + +1. [A flow](https://docs.prefect.io/tutorial/flows/) +2. A work pool +3. A worker + +Now it’s time to put it all together. We're going to update our `repo_info.py` file to build a Docker image and update our deployment so our worker can execute it. + +The updates that you need to make to `repo_info.py` are: + +1. Change `flow.serve` to `flow.deploy`. +2. Tell `flow.deploy` which work pool to deploy to. +3. Tell `flow.deploy` the name to use for the Docker image that will be built. + +Here's what the updated `repo_info.py` looks like: + + +```python repo_info.py + +import httpx +from prefect import flow + + +@flow(log_prints=True) +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + url = f"https://api.github.com/repos/{repo_name}" + response = httpx.get(url) + response.raise_for_status() + repo = response.json() + print(f"{repo_name} repository statistics 🤓:") + print(f"Stars 🌠 : {repo['stargazers_count']}") + print(f"Forks 🍴 : {repo['forks_count']}") + + +if __name__ == "__main__": + get_repo_info.deploy( + name="my-first-deployment", + work_pool_name="my-docker-pool", + image="my-first-deployment-image:tutorial", + push=False + ) + +``` + + +### Why the `push=False`? + +For this tutorial, your Docker worker is running on your machine, so we don't need to push the image built by `flow.deploy` to a registry. When your worker is running on a remote machine, you will need to push the image to a registry that the worker can access. + +Remove the `push=False` argument, include your registry name, and ensure you've [authenticated with the Docker CLI](https://docs.docker.com/engine/reference/commandline/login/) to push the image to a registry. + + +Now that you've updated your script, you can run it to deploy your flow to the work pool: + +``` +python repo_info.py +``` + +Prefect will build a custom Docker image containing your workflow code that the worker can use to dynamically spawn Docker containers whenever this workflow needs to run. + + +**What Dockerfile?** + +In this example, Prefect generates a Dockerfile for you that will build an image based off of one of Prefect's published images. The generated Dockerfile will copy the current directory into the Docker image and install any dependencies listed in a `requirements.txt` file. + +If you want to use a custom Dockerfile, you can specify the path to the Dockerfile using the `DeploymentImage` class: + +```python repo_info.py +import httpx +from prefect import flow +from prefect.deployments import DeploymentImage + + +@flow(log_prints=True) +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + url = f"https://api.github.com/repos/{repo_name}" + response = httpx.get(url) + response.raise_for_status() + repo = response.json() + print(f"{repo_name} repository statistics 🤓:") + print(f"Stars 🌠 : {repo['stargazers_count']}") + print(f"Forks 🍴 : {repo['forks_count']}") + + +if __name__ == "__main__": + get_repo_info.deploy( + name="my-first-deployment", + work_pool_name="my-docker-pool", + image=DeploymentImage( + name="my-first-deployment-image", + tag="tutorial", + dockerfile="Dockerfile" + ), + push=False + ) + +``` + + +### Modify the deployment + +If you need to make updates to your deployment, you can do so by modifying your script and rerunning it. You'll need to make one update to specify a value for `job_variables` to ensure your Docker worker can successfully execute scheduled runs for this flow. See the example below. + +The `job_variables` section allows you to fine-tune the infrastructure settings for a specific deployment. These values override default values in the specified work pool's [base job template](https://docs.prefect.io/concepts/work-pools/#base-job-template). + +When testing images locally without pushing them to a registry (to avoid potential errors like docker.errors.NotFound), it's recommended to include an `image_pull_policy` job\_variable set to `Never`. However, for production workflows, always consider pushing images to a remote registry for more reliability and accessibility. + +Here's how you can quickly set the `image_pull_policy` to be `Never` for this tutorial deployment without affecting the default value set on your work pool: + + +```python repo_info.py + +import httpx +from prefect import flow + + +@flow(log_prints=True) +def get_repo_info(repo_name: str = "PrefectHQ/prefect"): + url = f"https://api.github.com/repos/{repo_name}" + response = httpx.get(url) + response.raise_for_status() + repo = response.json() + print(f"{repo_name} repository statistics 🤓:") + print(f"Stars 🌠 : {repo['stargazers_count']}") + print(f"Forks 🍴 : {repo['forks_count']}") + + +if __name__ == "__main__": + get_repo_info.deploy( + name="my-first-deployment", + work_pool_name="my-docker-pool", + job_variables={"image_pull_policy": "Never"}, + image="my-first-deployment-image:tutorial", + push=False + ) + +``` + + +To register this update to your deployment's parameters with Prefect's API, run: + +``` +python repo_info.py +``` + +Now everything is set for us to submit a flow-run to the work pool: + +``` +prefect deployment run 'get_repo_info/my-deployment' + +``` + + +**Common Pitfall** + +* Store and run your deploy scripts at the **root of your repo**, otherwise the built Docker file may be missing files that it needs to execute! + + + +**Did you know?** + +A Prefect flow can have more than one deployment. This pattern can be useful if you want your flow to run in different execution environments. + + +Next steps +------------------------------------------- + +* Go deeper with deployments and learn about configuring deployments in YAML with [`prefect.yaml`](https://docs.prefect.io/guides/prefect-deploy/). +* [Concepts](https://docs.prefect.io/concepts/) contain deep dives into Prefect components. +* [Guides](https://docs.prefect.io/guides/) provide step-by-step recipes for common Prefect operations including: +* [Deploying flows on Kubernetes](https://docs.prefect.io/guides/deployment/kubernetes/) +* [Deploying flows in Docker](https://docs.prefect.io/guides/deployment/docker/) +* [Deploying flows on serverless infrastructure](https://docs.prefect.io/guides/deployment/serverless-workers/) +* [Daemonizing workers](https://docs.prefect.io/guides/deployment/daemonize/) + +Happy building! \ No newline at end of file diff --git a/docs/3.0rc/api-ref/index.mdx b/docs/3.0rc/api-ref/index.mdx new file mode 100644 index 000000000000..3589409ebc48 --- /dev/null +++ b/docs/3.0rc/api-ref/index.mdx @@ -0,0 +1,14 @@ +--- +title: API Reference +description: Explore Prefect's auto-generated reference documentation. +--- + +Prefect auto-generates reference documentation for the following components: + +- **[Prefect Python SDK](https://prefect-python-sdk-docs.netlify.app/)**: used to build, test, and execute workflows. +- **Prefect REST API**: used by workflow clients and the Prefect UI for orchestration and data retrieval. + - Prefect Cloud REST API documentation is available at https://app.prefect.cloud/api/docs. + - The REST API documentation for a locally hosted open-source Prefect server is available in the [Prefect REST API Reference](3.0rc/api-ref/server). + + +When self-hosting, you can access REST API documentation at the `/docs` endpoint of your [`PREFECT_API_URL`](/3.0rc/concepts/settings/#prefect_api_url) - for example, if you ran `prefect server start` with no additional configuration you can find this reference at http://localhost:4200/docs. diff --git a/docs/3.0rc/api-ref/python/index.mdx b/docs/3.0rc/api-ref/python/index.mdx new file mode 100644 index 000000000000..eb1d2ec5c866 --- /dev/null +++ b/docs/3.0rc/api-ref/python/index.mdx @@ -0,0 +1,12 @@ +--- +description: The Prefect Python SDK enables you to interact programmatically with Prefect's API +tags: + - API + - Python SDK +--- + +# Python SDK + +The Prefect Python SDK is used to build, test, and execute workflows against the Prefect API. + +Interact with docs [here](https://prefect-python-sdk-docs.netlify.app/). diff --git a/docs/3.0rc/api-ref/rest-api/cloud-rest-api-reference.mdx b/docs/3.0rc/api-ref/rest-api/cloud-rest-api-reference.mdx new file mode 100644 index 000000000000..ab442115fbed --- /dev/null +++ b/docs/3.0rc/api-ref/rest-api/cloud-rest-api-reference.mdx @@ -0,0 +1,4 @@ +--- +title: Prefect Cloud REST API Reference +url: https://app.prefect.cloud/api/docs +--- diff --git a/docs/3.0rc/api-ref/rest-api/index.mdx b/docs/3.0rc/api-ref/rest-api/index.mdx new file mode 100644 index 000000000000..e3401aa6ed58 --- /dev/null +++ b/docs/3.0rc/api-ref/rest-api/index.mdx @@ -0,0 +1,219 @@ +--- +description: Prefect REST API for interacting with the orchestration engine and Prefect Cloud. +tags: + - REST API + - Prefect Cloud + - Prefect server + - curl + - PrefectClient + - Requests + - API reference +--- + +# REST API + +The Prefect REST API is used for communicating data from clients to a Prefect server so that orchestration can be performed. This API is consumed by clients such as the Prefect Python SDK or the server dashboard. + +Prefect Cloud and a locally hosted Prefect server each provide a REST API. + +- Prefect Cloud: + - [Interactive Prefect Cloud REST API documentation](https://app.prefect.cloud/api/docs) + - [Finding your Prefect Cloud details](#finding-your-prefect-cloud-details) +- A self-hosted server instance: + - Interactive REST API documentation for a self-hosted Prefect server instance is available under **Server API** on the sidebar naviagtion or at `http://localhost:4200/docs` or the `/docs` endpoint of the [PREFECT_API_URL](/3.0rc/concepts/settings/#prefect_api_url) you have configured to access the server. You must have the server running with `prefect server start` to access the interactive documentation. + +## Interacting with the REST API + +You can interact with the Prefect REST API in several ways: + +- Create an instance of [`PrefectClient`](/3.0rc/api-ref/prefect/client/orchestration/#prefect.client.orchestration.PrefectClient) +- Use your favorite Python HTTP library such as [Requests](https://requests.readthedocs.io/en/latest/) or [HTTPX](https://www.python-httpx.org/) +- Use an HTTP library in your language of choice +- Use [curl](https://curl.se/) from the command line + +### PrefectClient with a Prefect server +This example uses `PrefectClient` with a locally hosted Prefect server: + +```python +import asyncio +from prefect.client import get_client + +async def get_flows(): + client = get_client() + r = await client.read_flows(limit=5) + return r + +r = asyncio.run(get_flows()) + +for flow in r: + print(flow.name, flow.id) + +if __name__ == "__main__": + asyncio.run(get_flows()) +``` + +Output: + +
+```bash +cat-facts 58ed68b1-0201-4f37-adef-0ea24bd2a022 +dog-facts e7c0403d-44e7-45cf-a6c8-79117b7f3766 +sloth-facts 771c0574-f5bf-4f59-a69d-3be3e061a62d +capybara-facts fbadaf8b-584f-48b9-b092-07d351edd424 +lemur-facts 53f710e7-3b0f-4b2f-ab6b-44934111818c +``` +
+ +### Requests with Prefect + +This example uses the Requests library with Prefect Cloud to return the five newest artifacts. + +```python +import requests + +PREFECT_API_URL="https://api.prefect.cloud/api/accounts/abc-my-cloud-account-id-is-here/workspaces/123-my-workspace-id-is-here" +PREFECT_API_KEY="123abc_my_api_key_goes_here" +data = { + "sort": "CREATED_DESC", + "limit": 5, + "artifacts": { + "key": { + "exists_": True + } + } +} + +headers = {"Authorization": f"Bearer {PREFECT_API_KEY}"} +endpoint = f"{PREFECT_API_URL}/artifacts/filter" + +response = requests.post(endpoint, headers=headers, json=data) +assert response.status_code == 200 +for artifact in response.json(): + print(artifact) +``` + +### curl with Prefect Cloud + +This example uses curl with Prefect Cloud to create a flow run: + +```bash +ACCOUNT_ID="abc-my-cloud-account-id-goes-here" +WORKSPACE_ID="123-my-workspace-id-goes-here" +PREFECT_API_URL="https://api.prefect.cloud/api/accounts/$ACCOUNT_ID/workspaces/$WORKSPACE_ID" +PREFECT_API_KEY="123abc_my_api_key_goes_here" +DEPLOYMENT_ID="my_deployment_id" + +curl --location --request POST "$PREFECT_API_URL/deployments/$DEPLOYMENT_ID/create_flow_run" \ + --header "Content-Type: application/json" \ + --header "Authorization: Bearer $PREFECT_API_KEY" \ + --header "X-PREFECT-API-VERSION: 0.8.4" \ + --data-raw "{}" +``` + +Note that in this example `--data-raw "{}"` is required and is where you can specify other aspects of the flow run such as the state. Windows users substitute `^` for `\` for line multi-line commands. + + +## Finding your Prefect Cloud details + +When working with the Prefect Cloud REST API you will need your Account ID and often the Workspace ID for the [workspace](/3.0rc/cloud/workspaces/) you want to interact with. You can find both IDs for a [Prefect profile](/3.0rc/concepts/settings/) in the CLI with `prefect profile inspect my_profile`. This command will also display your [Prefect API key](/3.0rc/cloud/users/api-keys/), as shown below: + +
+```bash +PREFECT_API_URL='https://api.prefect.cloud/api/accounts/abc-my-account-id-is-here/workspaces/123-my-workspace-id-is-here' +PREFECT_API_KEY='123abc_my_api_key_is_here' +``` +
+ +Alternatively, view your Account ID and Workspace ID in your browser URL. For example: `https://app.prefect.cloud/account/abc-my-account-id-is-here/workspaces/123-my-workspace-id-is-here`. + + +## REST Guidelines + +The REST APIs adhere to the following guidelines: + +- Collection names are pluralized (for example, `/flows` or `/runs`). +- We indicate variable placeholders with colons: `GET /flows/:id`. +- We use snake case for route names: `GET /task_runs`. +- We avoid nested resources unless there is no possibility of accessing the child resource outside the parent context. For example, we query `/task_runs` with a flow run filter instead of accessing `/flow_runs/:id/task_runs`. +- The API is hosted with an `/api/:version` prefix that (optionally) allows versioning in the future. By convention, we treat that as part of the base URL and do not include that in API examples. +- Filtering, sorting, and pagination parameters are provided in the request body of `POST` requests where applicable. + - Pagination parameters are `limit` and `offset`. + - Sorting is specified with a single `sort` parameter. + - See more information on [filtering](#filtering) below. + +### HTTP verbs + +- `GET`, `PUT` and `DELETE` requests are always idempotent. `POST` and `PATCH` are not guaranteed to be idempotent. +- `GET` requests cannot receive information from the request body. +- `POST` requests can receive information from the request body. +- `POST /collection` creates a new member of the collection. +- `GET /collection` lists all members of the collection. +- `GET /collection/:id` gets a specific member of the collection by ID. +- `DELETE /collection/:id` deletes a specific member of the collection. +- `PUT /collection/:id` creates or replaces a specific member of the collection. +- `PATCH /collection/:id` partially updates a specific member of the collection. +- `POST /collection/action` is how we implement non-CRUD actions. For example, to set a flow run's state, we use `POST /flow_runs/:id/set_state`. +- `POST /collection/action` may also be used for read-only queries. This is to allow us to send complex arguments as body arguments (which often cannot be done via `GET`). Examples include `POST /flow_runs/filter`, `POST /flow_runs/count`, and `POST /flow_runs/history`. + +## Filtering + +Objects can be filtered by providing filter criteria in the body of a `POST` request. When multiple criteria are specified, logical AND will be applied to the criteria. + +Filter criteria are structured as follows: + +```json +{ + "objects": { + "object_field": { + "field_operator_": + } + } +} +``` + +In this example, `objects` is the name of the collection to filter over (for example, `flows`). The collection can be either the object being queried for (`flows` for `POST /flows/filter`) or a related object (`flow_runs` for `POST /flows/filter`). + +`object_field` is the name of the field over which to filter (`name` for `flows`). Note that some objects may have nested object fields, such as `{flow_run: {state: {type: {any_: []}}}}`. + +`field_operator_` is the operator to apply to a field when filtering. Common examples include: + +- `any_`: return objects where this field matches any of the following values. +- `is_null_`: return objects where this field is or is not null. +- `eq_`: return objects where this field is equal to the following value. +- `all_`: return objects where this field matches all of the following values. +- `before_`: return objects where this datetime field is less than or equal to the following value. +- `after_`: return objects where this datetime field is greater than or equal to the following value. + +For example, to query for flows with the tag `"database"` and failed flow runs, `POST /flows/filter` with the following request body: + +```json +{ + "flows": { + "tags": { + "all_": ["database"] + } + }, + "flow_runs": { + "state": { + "type": { + "any_": ["FAILED"] + } + } + } +} +``` + +## OpenAPI + +The Prefect REST API can be fully described with an OpenAPI 3.0 compliant document. [OpenAPI](https://swagger.io/docs/specification/about/) is a standard specification for describing REST APIs. + +To generate the Prefect server's complete OpenAPI document, run the following commands in an interactive Python session: + +```python +from prefect.server.api.server import create_app + +app = create_app() +openapi_doc = app.openapi() +``` + +This document allows you to generate your own API client, explore the API using an API inspection tool, or write tests to ensure API compliance. diff --git a/docs/3.0rc/api-ref/search.js b/docs/3.0rc/api-ref/search.js new file mode 100644 index 000000000000..664bd5691e8f --- /dev/null +++ b/docs/3.0rc/api-ref/search.js @@ -0,0 +1,48 @@ +window.pdocSearch = (function () { +/** elasticlunr - http://weixsong.github.io * Copyright (C) 2017 Oliver Nightingale * Copyright (C) 2017 Wei Song * MIT Licensed */!function () { + function e(e) { if (null === e || "object" != typeof e) return e; var t = e.constructor(); for (var n in e) e.hasOwnProperty(n) && (t[n] = e[n]); return t } var t = function (e) { var n = new t.Index; return n.pipeline.add(t.trimmer, t.stopWordFilter, t.stemmer), e && e.call(n, n), n }; t.version = "0.9.5", lunr = t, t.utils = {}, t.utils.warn = function (e) { return function (t) { e.console && console.warn && console.warn(t) } }(this), t.utils.toString = function (e) { return void 0 === e || null === e ? "" : e.toString() }, t.EventEmitter = function () { this.events = {} }, t.EventEmitter.prototype.addListener = function () { var e = Array.prototype.slice.call(arguments), t = e.pop(), n = e; if ("function" != typeof t) throw new TypeError("last argument must be a function"); n.forEach(function (e) { this.hasHandler(e) || (this.events[e] = []), this.events[e].push(t) }, this) }, t.EventEmitter.prototype.removeListener = function (e, t) { if (this.hasHandler(e)) { var n = this.events[e].indexOf(t); -1 !== n && (this.events[e].splice(n, 1), 0 == this.events[e].length && delete this.events[e]) } }, t.EventEmitter.prototype.emit = function (e) { if (this.hasHandler(e)) { var t = Array.prototype.slice.call(arguments, 1); this.events[e].forEach(function (e) { e.apply(void 0, t) }, this) } }, t.EventEmitter.prototype.hasHandler = function (e) { return e in this.events }, t.tokenizer = function (e) { if (!arguments.length || null === e || void 0 === e) return []; if (Array.isArray(e)) { var n = e.filter(function (e) { return null === e || void 0 === e ? !1 : !0 }); n = n.map(function (e) { return t.utils.toString(e).toLowerCase() }); var i = []; return n.forEach(function (e) { var n = e.split(t.tokenizer.seperator); i = i.concat(n) }, this), i } return e.toString().trim().toLowerCase().split(t.tokenizer.seperator) }, t.tokenizer.defaultSeperator = /[\s\-]+/, t.tokenizer.seperator = t.tokenizer.defaultSeperator, t.tokenizer.setSeperator = function (e) { null !== e && void 0 !== e && "object" == typeof e && (t.tokenizer.seperator = e) }, t.tokenizer.resetSeperator = function () { t.tokenizer.seperator = t.tokenizer.defaultSeperator }, t.tokenizer.getSeperator = function () { return t.tokenizer.seperator }, t.Pipeline = function () { this._queue = [] }, t.Pipeline.registeredFunctions = {}, t.Pipeline.registerFunction = function (e, n) { n in t.Pipeline.registeredFunctions && t.utils.warn("Overwriting existing registered function: " + n), e.label = n, t.Pipeline.registeredFunctions[n] = e }, t.Pipeline.getRegisteredFunction = function (e) { return e in t.Pipeline.registeredFunctions != !0 ? null : t.Pipeline.registeredFunctions[e] }, t.Pipeline.warnIfFunctionNotRegistered = function (e) { var n = e.label && e.label in this.registeredFunctions; n || t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n", e) }, t.Pipeline.load = function (e) { var n = new t.Pipeline; return e.forEach(function (e) { var i = t.Pipeline.getRegisteredFunction(e); if (!i) throw new Error("Cannot load un-registered function: " + e); n.add(i) }), n }, t.Pipeline.prototype.add = function () { var e = Array.prototype.slice.call(arguments); e.forEach(function (e) { t.Pipeline.warnIfFunctionNotRegistered(e), this._queue.push(e) }, this) }, t.Pipeline.prototype.after = function (e, n) { t.Pipeline.warnIfFunctionNotRegistered(n); var i = this._queue.indexOf(e); if (-1 === i) throw new Error("Cannot find existingFn"); this._queue.splice(i + 1, 0, n) }, t.Pipeline.prototype.before = function (e, n) { t.Pipeline.warnIfFunctionNotRegistered(n); var i = this._queue.indexOf(e); if (-1 === i) throw new Error("Cannot find existingFn"); this._queue.splice(i, 0, n) }, t.Pipeline.prototype.remove = function (e) { var t = this._queue.indexOf(e); -1 !== t && this._queue.splice(t, 1) }, t.Pipeline.prototype.run = function (e) { for (var t = [], n = e.length, i = this._queue.length, o = 0; n > o; o++) { for (var r = e[o], s = 0; i > s && (r = this._queue[s](r, o, e), void 0 !== r && null !== r); s++); void 0 !== r && null !== r && t.push(r) } return t }, t.Pipeline.prototype.reset = function () { this._queue = [] }, t.Pipeline.prototype.get = function () { return this._queue }, t.Pipeline.prototype.toJSON = function () { return this._queue.map(function (e) { return t.Pipeline.warnIfFunctionNotRegistered(e), e.label }) }, t.Index = function () { this._fields = [], this._ref = "id", this.pipeline = new t.Pipeline, this.documentStore = new t.DocumentStore, this.index = {}, this.eventEmitter = new t.EventEmitter, this._idfCache = {}, this.on("add", "remove", "update", function () { this._idfCache = {} }.bind(this)) }, t.Index.prototype.on = function () { var e = Array.prototype.slice.call(arguments); return this.eventEmitter.addListener.apply(this.eventEmitter, e) }, t.Index.prototype.off = function (e, t) { return this.eventEmitter.removeListener(e, t) }, t.Index.load = function (e) { e.version !== t.version && t.utils.warn("version mismatch: current " + t.version + " importing " + e.version); var n = new this; n._fields = e.fields, n._ref = e.ref, n.documentStore = t.DocumentStore.load(e.documentStore), n.pipeline = t.Pipeline.load(e.pipeline), n.index = {}; for (var i in e.index) n.index[i] = t.InvertedIndex.load(e.index[i]); return n }, t.Index.prototype.addField = function (e) { return this._fields.push(e), this.index[e] = new t.InvertedIndex, this }, t.Index.prototype.setRef = function (e) { return this._ref = e, this }, t.Index.prototype.saveDocument = function (e) { return this.documentStore = new t.DocumentStore(e), this }, t.Index.prototype.addDoc = function (e, n) { if (e) { var n = void 0 === n ? !0 : n, i = e[this._ref]; this.documentStore.addDoc(i, e), this._fields.forEach(function (n) { var o = this.pipeline.run(t.tokenizer(e[n])); this.documentStore.addFieldLength(i, n, o.length); var r = {}; o.forEach(function (e) { e in r ? r[e] += 1 : r[e] = 1 }, this); for (var s in r) { var u = r[s]; u = Math.sqrt(u), this.index[n].addToken(s, { ref: i, tf: u }) } }, this), n && this.eventEmitter.emit("add", e, this) } }, t.Index.prototype.removeDocByRef = function (e) { if (e && this.documentStore.isDocStored() !== !1 && this.documentStore.hasDoc(e)) { var t = this.documentStore.getDoc(e); this.removeDoc(t, !1) } }, t.Index.prototype.removeDoc = function (e, n) { if (e) { var n = void 0 === n ? !0 : n, i = e[this._ref]; this.documentStore.hasDoc(i) && (this.documentStore.removeDoc(i), this._fields.forEach(function (n) { var o = this.pipeline.run(t.tokenizer(e[n])); o.forEach(function (e) { this.index[n].removeToken(e, i) }, this) }, this), n && this.eventEmitter.emit("remove", e, this)) } }, t.Index.prototype.updateDoc = function (e, t) { var t = void 0 === t ? !0 : t; this.removeDocByRef(e[this._ref], !1), this.addDoc(e, !1), t && this.eventEmitter.emit("update", e, this) }, t.Index.prototype.idf = function (e, t) { var n = "@" + t + "/" + e; if (Object.prototype.hasOwnProperty.call(this._idfCache, n)) return this._idfCache[n]; var i = this.index[t].getDocFreq(e), o = 1 + Math.log(this.documentStore.length / (i + 1)); return this._idfCache[n] = o, o }, t.Index.prototype.getFields = function () { return this._fields.slice() }, t.Index.prototype.search = function (e, n) { if (!e) return []; e = "string" == typeof e ? { any: e } : JSON.parse(JSON.stringify(e)); var i = null; null != n && (i = JSON.stringify(n)); for (var o = new t.Configuration(i, this.getFields()).get(), r = {}, s = Object.keys(e), u = 0; u < s.length; u++) { var a = s[u]; r[a] = this.pipeline.run(t.tokenizer(e[a])) } var l = {}; for (var c in o) { var d = r[c] || r.any; if (d) { var f = this.fieldSearch(d, c, o), h = o[c].boost; for (var p in f) f[p] = f[p] * h; for (var p in f) p in l ? l[p] += f[p] : l[p] = f[p] } } var v, g = []; for (var p in l) v = { ref: p, score: l[p] }, this.documentStore.hasDoc(p) && (v.doc = this.documentStore.getDoc(p)), g.push(v); return g.sort(function (e, t) { return t.score - e.score }), g }, t.Index.prototype.fieldSearch = function (e, t, n) { var i = n[t].bool, o = n[t].expand, r = n[t].boost, s = null, u = {}; return 0 !== r ? (e.forEach(function (e) { var n = [e]; 1 == o && (n = this.index[t].expandToken(e)); var r = {}; n.forEach(function (n) { var o = this.index[t].getDocs(n), a = this.idf(n, t); if (s && "AND" == i) { var l = {}; for (var c in s) c in o && (l[c] = o[c]); o = l } n == e && this.fieldSearchStats(u, n, o); for (var c in o) { var d = this.index[t].getTermFrequency(n, c), f = this.documentStore.getFieldLength(c, t), h = 1; 0 != f && (h = 1 / Math.sqrt(f)); var p = 1; n != e && (p = .15 * (1 - (n.length - e.length) / n.length)); var v = d * a * h * p; c in r ? r[c] += v : r[c] = v } }, this), s = this.mergeScores(s, r, i) }, this), s = this.coordNorm(s, u, e.length)) : void 0 }, t.Index.prototype.mergeScores = function (e, t, n) { if (!e) return t; if ("AND" == n) { var i = {}; for (var o in t) o in e && (i[o] = e[o] + t[o]); return i } for (var o in t) o in e ? e[o] += t[o] : e[o] = t[o]; return e }, t.Index.prototype.fieldSearchStats = function (e, t, n) { for (var i in n) i in e ? e[i].push(t) : e[i] = [t] }, t.Index.prototype.coordNorm = function (e, t, n) { for (var i in e) if (i in t) { var o = t[i].length; e[i] = e[i] * o / n } return e }, t.Index.prototype.toJSON = function () { var e = {}; return this._fields.forEach(function (t) { e[t] = this.index[t].toJSON() }, this), { version: t.version, fields: this._fields, ref: this._ref, documentStore: this.documentStore.toJSON(), index: e, pipeline: this.pipeline.toJSON() } }, t.Index.prototype.use = function (e) { var t = Array.prototype.slice.call(arguments, 1); t.unshift(this), e.apply(this, t) }, t.DocumentStore = function (e) { this._save = null === e || void 0 === e ? !0 : e, this.docs = {}, this.docInfo = {}, this.length = 0 }, t.DocumentStore.load = function (e) { var t = new this; return t.length = e.length, t.docs = e.docs, t.docInfo = e.docInfo, t._save = e.save, t }, t.DocumentStore.prototype.isDocStored = function () { return this._save }, t.DocumentStore.prototype.addDoc = function (t, n) { this.hasDoc(t) || this.length++, this.docs[t] = this._save === !0 ? e(n) : null }, t.DocumentStore.prototype.getDoc = function (e) { return this.hasDoc(e) === !1 ? null : this.docs[e] }, t.DocumentStore.prototype.hasDoc = function (e) { return e in this.docs }, t.DocumentStore.prototype.removeDoc = function (e) { this.hasDoc(e) && (delete this.docs[e], delete this.docInfo[e], this.length--) }, t.DocumentStore.prototype.addFieldLength = function (e, t, n) { null !== e && void 0 !== e && 0 != this.hasDoc(e) && (this.docInfo[e] || (this.docInfo[e] = {}), this.docInfo[e][t] = n) }, t.DocumentStore.prototype.updateFieldLength = function (e, t, n) { null !== e && void 0 !== e && 0 != this.hasDoc(e) && this.addFieldLength(e, t, n) }, t.DocumentStore.prototype.getFieldLength = function (e, t) { return null === e || void 0 === e ? 0 : e in this.docs && t in this.docInfo[e] ? this.docInfo[e][t] : 0 }, t.DocumentStore.prototype.toJSON = function () { return { docs: this.docs, docInfo: this.docInfo, length: this.length, save: this._save } }, t.stemmer = function () { var e = { ational: "ate", tional: "tion", enci: "ence", anci: "ance", izer: "ize", bli: "ble", alli: "al", entli: "ent", eli: "e", ousli: "ous", ization: "ize", ation: "ate", ator: "ate", alism: "al", iveness: "ive", fulness: "ful", ousness: "ous", aliti: "al", iviti: "ive", biliti: "ble", logi: "log" }, t = { icate: "ic", ative: "", alize: "al", iciti: "ic", ical: "ic", ful: "", ness: "" }, n = "[^aeiou]", i = "[aeiouy]", o = n + "[^aeiouy]*", r = i + "[aeiou]*", s = "^(" + o + ")?" + r + o, u = "^(" + o + ")?" + r + o + "(" + r + ")?$", a = "^(" + o + ")?" + r + o + r + o, l = "^(" + o + ")?" + i, c = new RegExp(s), d = new RegExp(a), f = new RegExp(u), h = new RegExp(l), p = /^(.+?)(ss|i)es$/, v = /^(.+?)([^s])s$/, g = /^(.+?)eed$/, m = /^(.+?)(ed|ing)$/, y = /.$/, S = /(at|bl|iz)$/, x = new RegExp("([^aeiouylsz])\\1$"), w = new RegExp("^" + o + i + "[^aeiouwxy]$"), I = /^(.+?[^aeiou])y$/, b = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/, E = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/, D = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/, F = /^(.+?)(s|t)(ion)$/, _ = /^(.+?)e$/, P = /ll$/, k = new RegExp("^" + o + i + "[^aeiouwxy]$"), z = function (n) { var i, o, r, s, u, a, l; if (n.length < 3) return n; if (r = n.substr(0, 1), "y" == r && (n = r.toUpperCase() + n.substr(1)), s = p, u = v, s.test(n) ? n = n.replace(s, "$1$2") : u.test(n) && (n = n.replace(u, "$1$2")), s = g, u = m, s.test(n)) { var z = s.exec(n); s = c, s.test(z[1]) && (s = y, n = n.replace(s, "")) } else if (u.test(n)) { var z = u.exec(n); i = z[1], u = h, u.test(i) && (n = i, u = S, a = x, l = w, u.test(n) ? n += "e" : a.test(n) ? (s = y, n = n.replace(s, "")) : l.test(n) && (n += "e")) } if (s = I, s.test(n)) { var z = s.exec(n); i = z[1], n = i + "i" } if (s = b, s.test(n)) { var z = s.exec(n); i = z[1], o = z[2], s = c, s.test(i) && (n = i + e[o]) } if (s = E, s.test(n)) { var z = s.exec(n); i = z[1], o = z[2], s = c, s.test(i) && (n = i + t[o]) } if (s = D, u = F, s.test(n)) { var z = s.exec(n); i = z[1], s = d, s.test(i) && (n = i) } else if (u.test(n)) { var z = u.exec(n); i = z[1] + z[2], u = d, u.test(i) && (n = i) } if (s = _, s.test(n)) { var z = s.exec(n); i = z[1], s = d, u = f, a = k, (s.test(i) || u.test(i) && !a.test(i)) && (n = i) } return s = P, u = d, s.test(n) && u.test(n) && (s = y, n = n.replace(s, "")), "y" == r && (n = r.toLowerCase() + n.substr(1)), n }; return z }(), t.Pipeline.registerFunction(t.stemmer, "stemmer"), t.stopWordFilter = function (e) { return e && t.stopWordFilter.stopWords[e] !== !0 ? e : void 0 }, t.clearStopWords = function () { t.stopWordFilter.stopWords = {} }, t.addStopWords = function (e) { null != e && Array.isArray(e) !== !1 && e.forEach(function (e) { t.stopWordFilter.stopWords[e] = !0 }, this) }, t.resetStopWords = function () { t.stopWordFilter.stopWords = t.defaultStopWords }, t.defaultStopWords = { "": !0, a: !0, able: !0, about: !0, across: !0, after: !0, all: !0, almost: !0, also: !0, am: !0, among: !0, an: !0, and: !0, any: !0, are: !0, as: !0, at: !0, be: !0, because: !0, been: !0, but: !0, by: !0, can: !0, cannot: !0, could: !0, dear: !0, did: !0, "do": !0, does: !0, either: !0, "else": !0, ever: !0, every: !0, "for": !0, from: !0, get: !0, got: !0, had: !0, has: !0, have: !0, he: !0, her: !0, hers: !0, him: !0, his: !0, how: !0, however: !0, i: !0, "if": !0, "in": !0, into: !0, is: !0, it: !0, its: !0, just: !0, least: !0, let: !0, like: !0, likely: !0, may: !0, me: !0, might: !0, most: !0, must: !0, my: !0, neither: !0, no: !0, nor: !0, not: !0, of: !0, off: !0, often: !0, on: !0, only: !0, or: !0, other: !0, our: !0, own: !0, rather: !0, said: !0, say: !0, says: !0, she: !0, should: !0, since: !0, so: !0, some: !0, than: !0, that: !0, the: !0, their: !0, them: !0, then: !0, there: !0, these: !0, they: !0, "this": !0, tis: !0, to: !0, too: !0, twas: !0, us: !0, wants: !0, was: !0, we: !0, were: !0, what: !0, when: !0, where: !0, which: !0, "while": !0, who: !0, whom: !0, why: !0, will: !0, "with": !0, would: !0, yet: !0, you: !0, your: !0 }, t.stopWordFilter.stopWords = t.defaultStopWords, t.Pipeline.registerFunction(t.stopWordFilter, "stopWordFilter"), t.trimmer = function (e) { + if (null === e || void 0 === e) throw new Error("token should not be undefined"); return e.replace(// ^\W + /,"").replace(/ /\W + $ /, "") }, t.Pipeline.registerFunction(t.trimmer, "trimmer"), t.InvertedIndex = function () { this.root = { docs: {}, df: 0 } }, t.InvertedIndex.load = function (e) { var t = new this; return t.root = e.root, t }, t.InvertedIndex.prototype.addToken = function (e, t, n) { for (var n = n || this.root, i = 0; i <= e.length - 1;) { var o = e[i]; o in n || (n[o] = { docs: {}, df: 0 }), i += 1, n = n[o] } var r = t.ref; n.docs[r] ? n.docs[r] = { tf: t.tf } : (n.docs[r] = { tf: t.tf }, n.df += 1) }, t.InvertedIndex.prototype.hasToken = function (e) { if (!e) return !1; for (var t = this.root, n = 0; n < e.length; n++) { if (!t[e[n]]) return !1; t = t[e[n]] } return !0 }, t.InvertedIndex.prototype.getNode = function (e) { if (!e) return null; for (var t = this.root, n = 0; n < e.length; n++) { if (!t[e[n]]) return null; t = t[e[n]] } return t }, t.InvertedIndex.prototype.getDocs = function (e) { var t = this.getNode(e); return null == t ? {} : t.docs }, t.InvertedIndex.prototype.getTermFrequency = function (e, t) { var n = this.getNode(e); return null == n ? 0 : t in n.docs ? n.docs[t].tf : 0 }, t.InvertedIndex.prototype.getDocFreq = function (e) { var t = this.getNode(e); return null == t ? 0 : t.df }, t.InvertedIndex.prototype.removeToken = function (e, t) { if (e) { var n = this.getNode(e); null != n && t in n.docs && (delete n.docs[t], n.df -= 1) } }, t.InvertedIndex.prototype.expandToken = function (e, t, n) { if (null == e || "" == e) return []; var t = t || []; if (void 0 == n && (n = this.getNode(e), null == n)) return t; n.df > 0 && t.push(e); for (var i in n) "docs" !== i && "df" !== i && this.expandToken(e + i, t, n[i]); return t }, t.InvertedIndex.prototype.toJSON = function () { return { root: this.root } }, t.Configuration = function (e, n) { var e = e || ""; if (void 0 == n || null == n) throw new Error("fields should not be null"); this.config = {}; var i; try { i = JSON.parse(e), this.buildUserConfig(i, n) } catch (o) { t.utils.warn("user configuration parse failed, will use default configuration"), this.buildDefaultConfig(n) } }, t.Configuration.prototype.buildDefaultConfig = function (e) { this.reset(), e.forEach(function (e) { this.config[e] = { boost: 1, bool: "OR", expand: !1 } }, this) }, t.Configuration.prototype.buildUserConfig = function (e, n) { var i = "OR", o = !1; if (this.reset(), "bool" in e && (i = e.bool || i), "expand" in e && (o = e.expand || o), "fields" in e) for (var r in e.fields) if (n.indexOf(r) > -1) { var s = e.fields[r], u = o; void 0 != s.expand && (u = s.expand), this.config[r] = { boost: s.boost || 0 === s.boost ? s.boost : 1, bool: s.bool || i, expand: u } } else t.utils.warn("field name in user configuration not found in index instance fields"); else this.addAllFields2UserConfig(i, o, n) }, t.Configuration.prototype.addAllFields2UserConfig = function (e, t, n) { n.forEach(function (n) { this.config[n] = { boost: 1, bool: e, expand: t } }, this) }, t.Configuration.prototype.get = function () { return this.config }, t.Configuration.prototype.reset = function () { this.config = {} }, lunr.SortedSet = function () { this.length = 0, this.elements = [] }, lunr.SortedSet.load = function (e) { var t = new this; return t.elements = e, t.length = e.length, t }, lunr.SortedSet.prototype.add = function () { var e, t; for (e = 0; e < arguments.length; e++)t = arguments[e], ~this.indexOf(t) || this.elements.splice(this.locationFor(t), 0, t); this.length = this.elements.length }, lunr.SortedSet.prototype.toArray = function () { return this.elements.slice() }, lunr.SortedSet.prototype.map = function (e, t) { return this.elements.map(e, t) }, lunr.SortedSet.prototype.forEach = function (e, t) { return this.elements.forEach(e, t) }, lunr.SortedSet.prototype.indexOf = function (e) { for (var t = 0, n = this.elements.length, i = n - t, o = t + Math.floor(i / 2), r = this.elements[o]; i > 1;) { if (r === e) return o; e > r && (t = o), r > e && (n = o), i = n - t, o = t + Math.floor(i / 2), r = this.elements[o] } return r === e ? o : -1 }, lunr.SortedSet.prototype.locationFor = function (e) { for (var t = 0, n = this.elements.length, i = n - t, o = t + Math.floor(i / 2), r = this.elements[o]; i > 1;)e > r && (t = o), r > e && (n = o), i = n - t, o = t + Math.floor(i / 2), r = this.elements[o]; return r > e ? o : e > r ? o + 1 : void 0 }, lunr.SortedSet.prototype.intersect = function (e) { for (var t = new lunr.SortedSet, n = 0, i = 0, o = this.length, r = e.length, s = this.elements, u = e.elements; ;) { if (n > o - 1 || i > r - 1) break; s[n] !== u[i] ? s[n] < u[i] ? n++ : s[n] > u[i] && i++ : (t.add(s[n]), n++, i++) } return t }, lunr.SortedSet.prototype.clone = function () { var e = new lunr.SortedSet; return e.elements = this.toArray(), e.length = e.elements.length, e }, lunr.SortedSet.prototype.union = function (e) { var t, n, i; this.length >= e.length ? (t = this, n = e) : (t = e, n = this), i = t.clone(); for (var o = 0, r = n.toArray(); o < r.length; o++)i.add(r[o]); return i }, lunr.SortedSet.prototype.toJSON = function () { return this.toArray() }, function (e, t) { "function" == typeof define && define.amd ? define(t) : "object" == typeof exports ? module.exports = t() : e.elasticlunr = t() }(this, function () { return t }) }(); + /** pdoc search index */const docs = { "version": "0.9.5", "fields": ["qualname", "fullname", "annotation", "default_value", "signature", "bases", "doc"], "ref": "fullname", "documentStore": { "docs": { "prefect": { "fullname": "prefect", "modulename": "prefect", "kind": "module", "doc": "

\n" }, "prefect.allow_failure": { "fullname": "prefect.allow_failure", "modulename": "prefect", "qualname": "allow_failure", "kind": "class", "doc": "

Wrapper for states or futures.

\n\n

Indicates that the upstream run for this input can be failed.

\n\n

Generally, Prefect will not allow a downstream run to start if any of its inputs\nare failed. This annotation allows you to opt into receiving a failed input\ndownstream.

\n\n

If the input is from a failed run, the attached exception will be passed to your\nfunction.

\n", "bases": "prefect.utilities.annotations.BaseAnnotation[~T]" }, "prefect.allow_failure.__init__": { "fullname": "prefect.allow_failure.__init__", "modulename": "prefect", "qualname": "allow_failure.__init__", "kind": "function", "doc": "

Create new instance of BaseAnnotation(value,)

\n", "signature": "(value)" }, "prefect.flow": { "fullname": "prefect.flow", "modulename": "prefect", "qualname": "flow", "kind": "function", "doc": "

Decorator to designate a function as a Prefect workflow.

\n\n

This decorator may be used for asynchronous or synchronous functions.

\n\n

Flow parameters must be serializable by Pydantic.

\n\n

Args:\n name: An optional name for the flow; if not provided, the name will be inferred\n from the given function.\n version: An optional version string for the flow; if not provided, we will\n attempt to create a version string as a hash of the file containing the\n wrapped function; if the file cannot be located, the version will be null.\n flow_run_name: An optional name to distinguish runs of this flow; this name can\n be provided as a string template with the flow's parameters as variables,\n or a function that returns a string.\n retries: An optional number of times to retry on flow run failure.\n retry_delay_seconds: An optional number of seconds to wait before retrying the\n flow after failure. This is only applicable if retries is nonzero.\n task_runner: An optional task runner to use for task execution within the flow; if\n not provided, a ConcurrentTaskRunner will be instantiated.\n description: An optional string description for the flow; if not provided, the\n description will be pulled from the docstring for the decorated function.\n timeout_seconds: An optional number of seconds indicating a maximum runtime for\n the flow. If the flow exceeds this runtime, it will be marked as failed.\n Flow execution may continue until the next task is called.\n validate_parameters: By default, parameters passed to flows are validated by\n Pydantic. This will check that input values conform to the annotated types\n on the function. Where possible, values will be coerced into the correct\n type; for example, if a parameter is defined as x: int and \"5\" is passed,\n it will be resolved to 5. If set to False, no validation will be\n performed on flow parameters.\n persist_result: An optional toggle indicating whether the result of this flow\n should be persisted to result storage. Defaults to None, which indicates\n that Prefect should choose whether the result should be persisted depending on\n the features being used.\n result_storage: An optional block to use to persist the result of this flow.\n This value will be used as the default for any tasks in this flow.\n If not provided, the local file system will be used unless called as\n a subflow, at which point the default will be loaded from the parent flow.\n result_serializer: An optional serializer to use to serialize the result of this\n flow for persistence. This value will be used as the default for any tasks\n in this flow. If not provided, the value of PREFECT_RESULTS_DEFAULT_SERIALIZER\n will be used unless called as a subflow, at which point the default will be\n loaded from the parent flow.\n cache_result_in_memory: An optional toggle indicating whether the cached result of\n a running the flow should be stored in memory. Defaults to True.\n log_prints: If set, print statements in the flow will be redirected to the\n Prefect logger for the flow run. Defaults to None, which indicates that\n the value from the parent flow should be used. If this is a parent flow,\n the default is pulled from the PREFECT_LOGGING_LOG_PRINTS setting.\n on_completion: An optional list of functions to call when the flow run is\n completed. Each function should accept three arguments: the flow, the flow\n run, and the final state of the flow run.\n on_failure: An optional list of functions to call when the flow run fails. Each\n function should accept three arguments: the flow, the flow run, and the\n final state of the flow run.\n on_cancellation: An optional list of functions to call when the flow run is\n cancelled. These functions will be passed the flow, flow run, and final state.\n on_crashed: An optional list of functions to call when the flow run crashes. Each\n function should accept three arguments: the flow, the flow run, and the\n final state of the flow run.\n on_running: An optional list of functions to call when the flow run is started. Each\n function should accept three arguments: the flow, the flow run, and the current state

\n\n

Returns:\n A callable Flow object which, when called, will run the flow and return its\n final state.

\n\n

Examples:\n Define a simple flow

\n\n
>>> from prefect import flow\n>>> @flow\n>>> def add(x, y):\n>>>     return x + y\n\nDefine an async flow\n\n>>> @flow\n>>> async def add(x, y):\n>>>     return x + y\n\nDefine a flow with a version and description\n\n>>> @flow(version=\"first-flow\", description=\"This flow is empty!\")\n>>> def my_flow():\n>>>     pass\n\nDefine a flow with a custom name\n\n>>> @flow(name=\"The Ultimate Flow\")\n>>> def my_flow():\n>>>     pass\n\nDefine a flow that submits its tasks to dask\n\n>>> from prefect_dask.task_runners import DaskTaskRunner\n>>>\n>>> @flow(task_runner=DaskTaskRunner)\n>>> def my_flow():\n>>>     pass\n
\n", "signature": "(\t__fn=None,\t*,\tname: Optional[str] = None,\tversion: Optional[str] = None,\tflow_run_name: Union[Callable[[], str], str, NoneType] = None,\tretries: int = None,\tretry_delay_seconds: Union[int, float] = None,\ttask_runner: prefect.task_runners.BaseTaskRunner = <class 'prefect.task_runners.ConcurrentTaskRunner'>,\tdescription: str = None,\ttimeout_seconds: Union[int, float] = None,\tvalidate_parameters: bool = True,\tpersist_result: Optional[bool] = None,\tresult_storage: Union[prefect.filesystems.WritableFileSystem, str, NoneType] = None,\tresult_serializer: Union[prefect.serializers.Serializer, str, NoneType] = None,\tcache_result_in_memory: bool = True,\tlog_prints: Optional[bool] = None,\ton_completion: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], Optional[Awaitable[NoneType]]]]] = None,\ton_failure: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], Optional[Awaitable[NoneType]]]]] = None,\ton_cancellation: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_crashed: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_running: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None):", "funcdef": "def" }, "prefect.Flow": { "fullname": "prefect.Flow", "modulename": "prefect", "qualname": "Flow", "kind": "class", "doc": "

A Prefect workflow definition.

\n\n

!!! note\n We recommend using the [@flow decorator][prefect.flows.flow] for most use-cases.

\n\n

Wraps a function with an entrypoint to the Prefect engine. To preserve the input\nand output types, we use the generic type variables P and R for \"Parameters\" and\n\"Returns\" respectively.

\n\n

Args:\n fn: The function defining the workflow.\n name: An optional name for the flow; if not provided, the name will be inferred\n from the given function.\n version: An optional version string for the flow; if not provided, we will\n attempt to create a version string as a hash of the file containing the\n wrapped function; if the file cannot be located, the version will be null.\n flow_run_name: An optional name to distinguish runs of this flow; this name can\n be provided as a string template with the flow's parameters as variables,\n or a function that returns a string.\n task_runner: An optional task runner to use for task execution within the flow;\n if not provided, a ConcurrentTaskRunner will be used.\n description: An optional string description for the flow; if not provided, the\n description will be pulled from the docstring for the decorated function.\n timeout_seconds: An optional number of seconds indicating a maximum runtime for\n the flow. If the flow exceeds this runtime, it will be marked as failed.\n Flow execution may continue until the next task is called.\n validate_parameters: By default, parameters passed to flows are validated by\n Pydantic. This will check that input values conform to the annotated types\n on the function. Where possible, values will be coerced into the correct\n type; for example, if a parameter is defined as x: int and \"5\" is passed,\n it will be resolved to 5. If set to False, no validation will be\n performed on flow parameters.\n retries: An optional number of times to retry on flow run failure.\n retry_delay_seconds: An optional number of seconds to wait before retrying the\n flow after failure. This is only applicable if retries is nonzero.\n persist_result: An optional toggle indicating whether the result of this flow\n should be persisted to result storage. Defaults to None, which indicates\n that Prefect should choose whether the result should be persisted depending on\n the features being used.\n result_storage: An optional block to use to persist the result of this flow.\n This value will be used as the default for any tasks in this flow.\n If not provided, the local file system will be used unless called as\n a subflow, at which point the default will be loaded from the parent flow.\n result_serializer: An optional serializer to use to serialize the result of this\n flow for persistence. This value will be used as the default for any tasks\n in this flow. If not provided, the value of PREFECT_RESULTS_DEFAULT_SERIALIZER\n will be used unless called as a subflow, at which point the default will be\n loaded from the parent flow.\n on_failure: An optional list of callables to run when the flow enters a failed state.\n on_completion: An optional list of callables to run when the flow enters a completed state.\n on_cancellation: An optional list of callables to run when the flow enters a cancelling state.\n on_crashed: An optional list of callables to run when the flow enters a crashed state.\n on_running: An optional list of callables to run when the flow enters a running state.

\n", "bases": "typing.Generic[~P, ~R]" }, "prefect.Flow.__init__": { "fullname": "prefect.Flow.__init__", "modulename": "prefect", "qualname": "Flow.__init__", "kind": "function", "doc": "

\n", "signature": "(\tfn: Callable[~P, ~R],\tname: Optional[str] = None,\tversion: Optional[str] = None,\tflow_run_name: Union[Callable[[], str], str, NoneType] = None,\tretries: Optional[int] = None,\tretry_delay_seconds: Union[int, float, NoneType] = None,\ttask_runner: Union[Type[prefect.task_runners.BaseTaskRunner], prefect.task_runners.BaseTaskRunner] = <class 'prefect.task_runners.ConcurrentTaskRunner'>,\tdescription: str = None,\ttimeout_seconds: Union[int, float] = None,\tvalidate_parameters: bool = True,\tpersist_result: Optional[bool] = None,\tresult_storage: Union[prefect.filesystems.WritableFileSystem, str, NoneType] = None,\tresult_serializer: Union[prefect.serializers.Serializer, str, NoneType] = None,\tcache_result_in_memory: bool = True,\tlog_prints: Optional[bool] = None,\ton_completion: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_failure: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_cancellation: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_crashed: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_running: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None)" }, "prefect.Flow.name": { "fullname": "prefect.Flow.name", "modulename": "prefect", "qualname": "Flow.name", "kind": "variable", "doc": "

\n" }, "prefect.Flow.flow_run_name": { "fullname": "prefect.Flow.flow_run_name", "modulename": "prefect", "qualname": "Flow.flow_run_name", "kind": "variable", "doc": "

\n" }, "prefect.Flow.task_runner": { "fullname": "prefect.Flow.task_runner", "modulename": "prefect", "qualname": "Flow.task_runner", "kind": "variable", "doc": "

\n" }, "prefect.Flow.log_prints": { "fullname": "prefect.Flow.log_prints", "modulename": "prefect", "qualname": "Flow.log_prints", "kind": "variable", "doc": "

\n" }, "prefect.Flow.description": { "fullname": "prefect.Flow.description", "modulename": "prefect", "qualname": "Flow.description", "kind": "variable", "doc": "

\n" }, "prefect.Flow.fn": { "fullname": "prefect.Flow.fn", "modulename": "prefect", "qualname": "Flow.fn", "kind": "variable", "doc": "

\n" }, "prefect.Flow.isasync": { "fullname": "prefect.Flow.isasync", "modulename": "prefect", "qualname": "Flow.isasync", "kind": "variable", "doc": "

\n" }, "prefect.Flow.version": { "fullname": "prefect.Flow.version", "modulename": "prefect", "qualname": "Flow.version", "kind": "variable", "doc": "

\n" }, "prefect.Flow.timeout_seconds": { "fullname": "prefect.Flow.timeout_seconds", "modulename": "prefect", "qualname": "Flow.timeout_seconds", "kind": "variable", "doc": "

\n" }, "prefect.Flow.retries": { "fullname": "prefect.Flow.retries", "modulename": "prefect", "qualname": "Flow.retries", "kind": "variable", "doc": "

\n" }, "prefect.Flow.retry_delay_seconds": { "fullname": "prefect.Flow.retry_delay_seconds", "modulename": "prefect", "qualname": "Flow.retry_delay_seconds", "kind": "variable", "doc": "

\n" }, "prefect.Flow.parameters": { "fullname": "prefect.Flow.parameters", "modulename": "prefect", "qualname": "Flow.parameters", "kind": "variable", "doc": "

\n" }, "prefect.Flow.should_validate_parameters": { "fullname": "prefect.Flow.should_validate_parameters", "modulename": "prefect", "qualname": "Flow.should_validate_parameters", "kind": "variable", "doc": "

\n" }, "prefect.Flow.persist_result": { "fullname": "prefect.Flow.persist_result", "modulename": "prefect", "qualname": "Flow.persist_result", "kind": "variable", "doc": "

\n" }, "prefect.Flow.result_storage": { "fullname": "prefect.Flow.result_storage", "modulename": "prefect", "qualname": "Flow.result_storage", "kind": "variable", "doc": "

\n" }, "prefect.Flow.result_serializer": { "fullname": "prefect.Flow.result_serializer", "modulename": "prefect", "qualname": "Flow.result_serializer", "kind": "variable", "doc": "

\n" }, "prefect.Flow.cache_result_in_memory": { "fullname": "prefect.Flow.cache_result_in_memory", "modulename": "prefect", "qualname": "Flow.cache_result_in_memory", "kind": "variable", "doc": "

\n" }, "prefect.Flow.on_completion": { "fullname": "prefect.Flow.on_completion", "modulename": "prefect", "qualname": "Flow.on_completion", "kind": "variable", "doc": "

\n" }, "prefect.Flow.on_failure": { "fullname": "prefect.Flow.on_failure", "modulename": "prefect", "qualname": "Flow.on_failure", "kind": "variable", "doc": "

\n" }, "prefect.Flow.on_cancellation": { "fullname": "prefect.Flow.on_cancellation", "modulename": "prefect", "qualname": "Flow.on_cancellation", "kind": "variable", "doc": "

\n" }, "prefect.Flow.on_crashed": { "fullname": "prefect.Flow.on_crashed", "modulename": "prefect", "qualname": "Flow.on_crashed", "kind": "variable", "doc": "

\n" }, "prefect.Flow.on_running": { "fullname": "prefect.Flow.on_running", "modulename": "prefect", "qualname": "Flow.on_running", "kind": "variable", "doc": "

\n" }, "prefect.Flow.with_options": { "fullname": "prefect.Flow.with_options", "modulename": "prefect", "qualname": "Flow.with_options", "kind": "function", "doc": "

Create a new flow from the current object, updating provided options.

\n\n

Args:\n name: A new name for the flow.\n version: A new version for the flow.\n description: A new description for the flow.\n flow_run_name: An optional name to distinguish runs of this flow; this name\n can be provided as a string template with the flow's parameters as variables,\n or a function that returns a string.\n task_runner: A new task runner for the flow.\n timeout_seconds: A new number of seconds to fail the flow after if still\n running.\n validate_parameters: A new value indicating if flow calls should validate\n given parameters.\n retries: A new number of times to retry on flow run failure.\n retry_delay_seconds: A new number of seconds to wait before retrying the\n flow after failure. This is only applicable if retries is nonzero.\n persist_result: A new option for enabling or disabling result persistence.\n result_storage: A new storage type to use for results.\n result_serializer: A new serializer to use for results.\n cache_result_in_memory: A new value indicating if the flow's result should\n be cached in memory.\n on_failure: A new list of callables to run when the flow enters a failed state.\n on_completion: A new list of callables to run when the flow enters a completed state.\n on_cancellation: A new list of callables to run when the flow enters a cancelling state.\n on_crashed: A new list of callables to run when the flow enters a crashed state.\n on_running: A new list of callables to run when the flow enters a running state.

\n\n

Returns:\n A new Flow instance.

\n\n

Examples:

\n\n
Create a new flow from an existing flow and update the name:\n\n>>> @flow(name=\"My flow\")\n>>> def my_flow():\n>>>     return 1\n>>>\n>>> new_flow = my_flow.with_options(name=\"My new flow\")\n\nCreate a new flow from an existing flow, update the task runner, and call\nit without an intermediate variable:\n\n>>> from prefect.task_runners import SequentialTaskRunner\n>>>\n>>> @flow\n>>> def my_flow(x, y):\n>>>     return x + y\n>>>\n>>> state = my_flow.with_options(task_runner=SequentialTaskRunner)(1, 3)\n>>> assert state.result() == 4\n
\n", "signature": "(\tself,\t*,\tname: str = None,\tversion: str = None,\tretries: Optional[int] = None,\tretry_delay_seconds: Union[int, float, NoneType] = None,\tdescription: str = None,\tflow_run_name: Union[Callable[[], str], str, NoneType] = None,\ttask_runner: Union[Type[prefect.task_runners.BaseTaskRunner], prefect.task_runners.BaseTaskRunner] = None,\ttimeout_seconds: Union[int, float] = None,\tvalidate_parameters: bool = None,\tpersist_result: Optional[bool] = <class 'prefect.utilities.annotations.NotSet'>,\tresult_storage: Union[prefect.filesystems.WritableFileSystem, str, NoneType] = <class 'prefect.utilities.annotations.NotSet'>,\tresult_serializer: Union[prefect.serializers.Serializer, str, NoneType] = <class 'prefect.utilities.annotations.NotSet'>,\tcache_result_in_memory: bool = None,\tlog_prints: Optional[bool] = <class 'prefect.utilities.annotations.NotSet'>,\ton_completion: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_failure: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_cancellation: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_crashed: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_running: Optional[List[Callable[[prefect.client.schemas.objects.Flow, prefect.client.schemas.objects.FlowRun, prefect.client.schemas.objects.State], NoneType]]] = None) -> Self:", "funcdef": "def" }, "prefect.Flow.validate_parameters": { "fullname": "prefect.Flow.validate_parameters", "modulename": "prefect", "qualname": "Flow.validate_parameters", "kind": "function", "doc": "

Validate parameters for compatibility with the flow by attempting to cast the inputs to the\nassociated types specified by the function's type annotations.

\n\n

Returns:\n A new dict of parameters that have been cast to the appropriate types

\n\n

Raises:\n ParameterTypeError: if the provided parameters are not valid

\n", "signature": "(self, parameters: Dict[str, Any]) -> Dict[str, Any]:", "funcdef": "def" }, "prefect.Flow.serialize_parameters": { "fullname": "prefect.Flow.serialize_parameters", "modulename": "prefect", "qualname": "Flow.serialize_parameters", "kind": "function", "doc": "

Convert parameters to a serializable form.

\n\n

Uses FastAPI's jsonable_encoder to convert to JSON compatible objects without\nconverting everything directly to a string. This maintains basic types like\nintegers during API roundtrips.

\n", "signature": "(self, parameters: Dict[str, Any]) -> Dict[str, Any]:", "funcdef": "def" }, "prefect.Flow.to_deployment": { "fullname": "prefect.Flow.to_deployment", "modulename": "prefect", "qualname": "Flow.to_deployment", "kind": "function", "doc": "

Creates a runner deployment object for this flow.

\n\n

Args:\n name: The name to give the created deployment.\n interval: An interval on which to execute the new deployment. Accepts either a number\n or a timedelta object. If a number is given, it will be interpreted as seconds.\n cron: A cron schedule of when to execute runs of this deployment.\n rrule: An rrule schedule of when to execute runs of this deployment.\n paused: Whether or not to set this deployment as paused.\n schedules: A list of schedule objects defining when to execute runs of this deployment.\n Used to define multiple schedules or additional scheduling options such as timezone.\n schedule: A schedule object defining when to execute runs of this deployment.\n is_schedule_active: Whether or not to set the schedule for this deployment as active. If\n not provided when creating a deployment, the schedule will be set as active. If not\n provided when updating a deployment, the schedule's activation will not be changed.\n parameters: A dictionary of default parameter values to pass to runs of this deployment.\n triggers: A list of triggers that will kick off runs of this deployment.\n description: A description for the created deployment. Defaults to the flow's\n description if not provided.\n tags: A list of tags to associate with the created deployment for organizational\n purposes.\n version: A version for the created deployment. Defaults to the flow's version.\n enforce_parameter_schema: Whether or not the Prefect API should enforce the\n parameter schema for the created deployment.\n work_pool_name: The name of the work pool to use for this deployment.\n work_queue_name: The name of the work queue to use for this deployment's scheduled runs.\n If not provided the default work queue for the work pool will be used.\n job_variables: Settings used to override the values specified default base job template\n of the chosen work pool. Refer to the base job template of the chosen work pool for\n entrypoint_type: Type of entrypoint to use for the deployment. When using a module path\n entrypoint, ensure that the module will be importable in the execution environment.

\n\n

Examples:\n Prepare two deployments and serve them:\n

\n
from prefect import flow, serve\n 
\n @flow\n def my_flow(name):\n print(f"hello {name}")\n
\n @flow\n def my_other_flow(name):\n print(f"goodbye {name}")\n
\n if __name__ == "__main__":\n hello_deploy = my_flow.to_deployment("hello", tags=["dev"])\n bye_deploy = my_other_flow.to_deployment("goodbye", tags=["dev"])\n serve(hello_deploy, bye_deploy)\n
\n

\n", "signature": "(\tself,\tname: str,\tinterval: Union[Iterable[Union[int, float, datetime.timedelta]], int, float, datetime.timedelta, NoneType] = None,\tcron: Union[Iterable[str], str, NoneType] = None,\trrule: Union[Iterable[str], str, NoneType] = None,\tpaused: Optional[bool] = None,\tschedules: Optional[List[Sequence[Union[prefect.client.schemas.objects.MinimalDeploymentSchedule, dict, prefect.client.schemas.schedules.IntervalSchedule, prefect.client.schemas.schedules.CronSchedule, prefect.client.schemas.schedules.RRuleSchedule, prefect.client.schemas.schedules.NoSchedule]]]] = None,\tschedule: Union[prefect.client.schemas.schedules.IntervalSchedule, prefect.client.schemas.schedules.CronSchedule, prefect.client.schemas.schedules.RRuleSchedule, prefect.client.schemas.schedules.NoSchedule, NoneType] = None,\tis_schedule_active: Optional[bool] = None,\tparameters: Optional[dict] = None,\ttriggers: Optional[List[Union[prefect.events.schemas.deployment_triggers.DeploymentEventTrigger, prefect.events.schemas.deployment_triggers.DeploymentMetricTrigger, prefect.events.schemas.deployment_triggers.DeploymentCompoundTrigger, prefect.events.schemas.deployment_triggers.DeploymentSequenceTrigger, prefect.events.schemas.automations.EventTrigger, prefect.events.schemas.automations.MetricTrigger, prefect.events.schemas.automations.CompoundTrigger, prefect.events.schemas.automations.SequenceTrigger]]] = None,\tdescription: Optional[str] = None,\ttags: Optional[List[str]] = None,\tversion: Optional[str] = None,\tenforce_parameter_schema: bool = False,\twork_pool_name: Optional[str] = None,\twork_queue_name: Optional[str] = None,\tjob_variables: Optional[Dict[str, Any]] = None,\tentrypoint_type: prefect.deployments.runner.EntrypointType = <EntrypointType.FILE_PATH: 'file_path'>) -> prefect.deployments.runner.RunnerDeployment:", "funcdef": "def" }, "prefect.Flow.serve": { "fullname": "prefect.Flow.serve", "modulename": "prefect", "qualname": "Flow.serve", "kind": "function", "doc": "

Creates a deployment for this flow and starts a runner to monitor for scheduled work.

\n\n

Args:\n name: The name to give the created deployment. Defaults to the name of the flow.\n interval: An interval on which to execute the deployment. Accepts a number or a\n timedelta object to create a single schedule. If a number is given, it will be\n interpreted as seconds. Also accepts an iterable of numbers or timedelta to create\n multiple schedules.\n cron: A cron schedule string of when to execute runs of this deployment.\n Also accepts an iterable of cron schedule strings to create multiple schedules.\n rrule: An rrule schedule string of when to execute runs of this deployment.\n Also accepts an iterable of rrule schedule strings to create multiple schedules.\n triggers: A list of triggers that will kick off runs of this deployment.\n paused: Whether or not to set this deployment as paused.\n schedules: A list of schedule objects defining when to execute runs of this deployment.\n Used to define multiple schedules or additional scheduling options like timezone.\n schedule: A schedule object defining when to execute runs of this deployment. Used to\n define additional scheduling options such as timezone.\n is_schedule_active: Whether or not to set the schedule for this deployment as active. If\n not provided when creating a deployment, the schedule will be set as active. If not\n provided when updating a deployment, the schedule's activation will not be changed.\n parameters: A dictionary of default parameter values to pass to runs of this deployment.\n description: A description for the created deployment. Defaults to the flow's\n description if not provided.\n tags: A list of tags to associate with the created deployment for organizational\n purposes.\n version: A version for the created deployment. Defaults to the flow's version.\n enforce_parameter_schema: Whether or not the Prefect API should enforce the\n parameter schema for the created deployment.\n pause_on_shutdown: If True, provided schedule will be paused when the serve function is stopped.\n If False, the schedules will continue running.\n print_starting_message: Whether or not to print the starting message when flow is served.\n limit: The maximum number of runs that can be executed concurrently.\n webserver: Whether or not to start a monitoring webserver for this flow.\n entrypoint_type: Type of entrypoint to use for the deployment. When using a module path\n entrypoint, ensure that the module will be importable in the execution environment.

\n\n

Examples:\n Serve a flow:\n

\n
from prefect import flow\n 
\n @flow\n def my_flow(name):\n print(f"hello {name}")\n
\n if __name__ == "__main__":\n my_flow.serve("example-deployment")\n
\n

\n\n
Serve a flow and run it every hour:\n<div class=\"pdoc-code codehilite\">\n<pre><span></span><code><span class=\"kn\">from</span> <span class=\"nn\">prefect</span> <span class=\"kn\">import</span> <span class=\"n\">flow</span>\n\n<span class=\"nd\">@flow</span>\n<span class=\"k\">def</span> <span class=\"nf\">my_flow</span><span class=\"p\">(</span><span class=\"n\">name</span><span class=\"p\">):</span>\n    <span class=\"nb\">print</span><span class=\"p\">(</span><span class=\"sa\">f</span><span class=\"s2\">&quot;hello </span><span class=\"si\">{</span><span class=\"n\">name</span><span class=\"si\">}</span><span class=\"s2\">&quot;</span><span class=\"p\">)</span>\n\n<span class=\"k\">if</span> <span class=\"vm\">__name__</span> <span class=\"o\">==</span> <span class=\"s2\">&quot;__main__&quot;</span><span class=\"p\">:</span>\n    <span class=\"n\">my_flow</span><span class=\"o\">.</span><span class=\"n\">serve</span><span class=\"p\">(</span><span class=\"s2\">&quot;example-deployment&quot;</span><span class=\"p\">,</span> <span class=\"n\">interval</span><span class=\"o\">=</span><span class=\"mi\">3600</span><span class=\"p\">)</span>\n</code></pre>\n</div>\n
\n", "signature": "(\tself,\tname: Optional[str] = None,\tinterval: Union[Iterable[Union[int, float, datetime.timedelta]], int, float, datetime.timedelta, NoneType] = None,\tcron: Union[Iterable[str], str, NoneType] = None,\trrule: Union[Iterable[str], str, NoneType] = None,\tpaused: Optional[bool] = None,\tschedules: Optional[List[Sequence[Union[prefect.client.schemas.objects.MinimalDeploymentSchedule, dict, prefect.client.schemas.schedules.IntervalSchedule, prefect.client.schemas.schedules.CronSchedule, prefect.client.schemas.schedules.RRuleSchedule, prefect.client.schemas.schedules.NoSchedule]]]] = None,\tschedule: Union[prefect.client.schemas.schedules.IntervalSchedule, prefect.client.schemas.schedules.CronSchedule, prefect.client.schemas.schedules.RRuleSchedule, prefect.client.schemas.schedules.NoSchedule, NoneType] = None,\tis_schedule_active: Optional[bool] = None,\ttriggers: Optional[List[Union[prefect.events.schemas.deployment_triggers.DeploymentEventTrigger, prefect.events.schemas.deployment_triggers.DeploymentMetricTrigger, prefect.events.schemas.deployment_triggers.DeploymentCompoundTrigger, prefect.events.schemas.deployment_triggers.DeploymentSequenceTrigger, prefect.events.schemas.automations.EventTrigger, prefect.events.schemas.automations.MetricTrigger, prefect.events.schemas.automations.CompoundTrigger, prefect.events.schemas.automations.SequenceTrigger]]] = None,\tparameters: Optional[dict] = None,\tdescription: Optional[str] = None,\ttags: Optional[List[str]] = None,\tversion: Optional[str] = None,\tenforce_parameter_schema: bool = False,\tpause_on_shutdown: bool = True,\tprint_starting_message: bool = True,\tlimit: Optional[int] = None,\twebserver: bool = False,\tentrypoint_type: prefect.deployments.runner.EntrypointType = <EntrypointType.FILE_PATH: 'file_path'>):", "funcdef": "async def" }, "prefect.Flow.from_source": { "fullname": "prefect.Flow.from_source", "modulename": "prefect", "qualname": "Flow.from_source", "kind": "function", "doc": "

Loads a flow from a remote source.

\n\n

Args:\n source: Either a URL to a git repository or a storage object.\n entrypoint: The path to a file containing a flow and the name of the flow function in\n the format ./path/to/file.py:flow_func_name.

\n\n

Returns:\n A new Flow instance.

\n\n

Examples:\n Load a flow from a public git repository:\n

\n
from prefect import flow\n    from prefect.runner.storage import GitRepository\n    from prefect.blocks.system import Secret\n 
\n my_flow = flow.from_source(\n source="https://github.com/org/repo.git&quot;,\n entrypoint="flows.py:my_flow",\n )\n
\n my_flow()\n
\n

\n\n
Load a flow from a private git repository using an access token stored in a `Secret` block:\n<div class=\"pdoc-code codehilite\">\n<pre><span></span><code><span class=\"kn\">from</span> <span class=\"nn\">prefect</span> <span class=\"kn\">import</span> <span class=\"n\">flow</span>\n<span class=\"kn\">from</span> <span class=\"nn\">prefect.runner.storage</span> <span class=\"kn\">import</span> <span class=\"n\">GitRepository</span>\n<span class=\"kn\">from</span> <span class=\"nn\">prefect.blocks.system</span> <span class=\"kn\">import</span> <span class=\"n\">Secret</span>\n\n<span class=\"n\">my_flow</span> <span class=\"o\">=</span> <span class=\"n\">flow</span><span class=\"o\">.</span><span class=\"n\">from_source</span><span class=\"p\">(</span>\n    <span class=\"n\">source</span><span class=\"o\">=</span><span class=\"n\">GitRepository</span><span class=\"p\">(</span>\n        <span class=\"n\">url</span><span class=\"o\">=</span><span class=\"s2\">&quot;https://github.com/org/repo.git&quot;</span><span class=\"p\">,</span>\n        <span class=\"n\">credentials</span><span class=\"o\">=</span><span class=\"p\">{</span><span class=\"s2\">&quot;access_token&quot;</span><span class=\"p\">:</span> <span class=\"n\">Secret</span><span class=\"o\">.</span><span class=\"n\">load</span><span class=\"p\">(</span><span class=\"s2\">&quot;github-access-token&quot;</span><span class=\"p\">)}</span>\n    <span class=\"p\">),</span>\n    <span class=\"n\">entrypoint</span><span class=\"o\">=</span><span class=\"s2\">&quot;flows.py:my_flow&quot;</span><span class=\"p\">,</span>\n<span class=\"p\">)</span>\n\n<span class=\"n\">my_flow</span><span class=\"p\">()</span>\n</code></pre>\n</div>\n
\n", "signature": "(\tcls: Type[~F],\tsource: Union[str, prefect.runner.storage.RunnerStorage, prefect.filesystems.ReadableDeploymentStorage],\tentrypoint: str) -> ~F:", "funcdef": "def" }, "prefect.Flow.deploy": { "fullname": "prefect.Flow.deploy", "modulename": "prefect", "qualname": "Flow.deploy", "kind": "function", "doc": "

Deploys a flow to run on dynamic infrastructure via a work pool.

\n\n

By default, calling this method will build a Docker image for the flow, push it to a registry,\nand create a deployment via the Prefect API that will run the flow on the given schedule.

\n\n

If you want to use an existing image, you can pass build=False to skip building and pushing\nan image.

\n\n

Args:\n name: The name to give the created deployment.\n work_pool_name: The name of the work pool to use for this deployment. Defaults to\n the value of PREFECT_DEFAULT_WORK_POOL_NAME.\n image: The name of the Docker image to build, including the registry and\n repository. Pass a DeploymentImage instance to customize the Dockerfile used\n and build arguments.\n build: Whether or not to build a new image for the flow. If False, the provided\n image will be used as-is and pulled at runtime.\n push: Whether or not to skip pushing the built image to a registry.\n work_queue_name: The name of the work queue to use for this deployment's scheduled runs.\n If not provided the default work queue for the work pool will be used.\n job_variables: Settings used to override the values specified default base job template\n of the chosen work pool. Refer to the base job template of the chosen work pool for\n available settings.\n interval: An interval on which to execute the deployment. Accepts a number or a\n timedelta object to create a single schedule. If a number is given, it will be\n interpreted as seconds. Also accepts an iterable of numbers or timedelta to create\n multiple schedules.\n cron: A cron schedule string of when to execute runs of this deployment.\n Also accepts an iterable of cron schedule strings to create multiple schedules.\n rrule: An rrule schedule string of when to execute runs of this deployment.\n Also accepts an iterable of rrule schedule strings to create multiple schedules.\n triggers: A list of triggers that will kick off runs of this deployment.\n paused: Whether or not to set this deployment as paused.\n schedules: A list of schedule objects defining when to execute runs of this deployment.\n Used to define multiple schedules or additional scheduling options like timezone.\n schedule: A schedule object defining when to execute runs of this deployment. Used to\n define additional scheduling options like timezone.\n is_schedule_active: Whether or not to set the schedule for this deployment as active. If\n not provided when creating a deployment, the schedule will be set as active. If not\n provided when updating a deployment, the schedule's activation will not be changed.\n parameters: A dictionary of default parameter values to pass to runs of this deployment.\n description: A description for the created deployment. Defaults to the flow's\n description if not provided.\n tags: A list of tags to associate with the created deployment for organizational\n purposes.\n version: A version for the created deployment. Defaults to the flow's version.\n enforce_parameter_schema: Whether or not the Prefect API should enforce the\n parameter schema for the created deployment.\n entrypoint_type: Type of entrypoint to use for the deployment. When using a module path\n entrypoint, ensure that the module will be importable in the execution environment.\n print_next_steps_message: Whether or not to print a message with next steps\n after deploying the deployments.\n ignore_warnings: Whether or not to ignore warnings about the work pool type.

\n\n

Returns:\n The ID of the created/updated deployment.

\n\n

Examples:\n Deploy a local flow to a work pool:\n

\n
from prefect import flow\n 
\n @flow\n def my_flow(name):\n print(f"hello {name}")\n
\n if __name__ == "__main__":\n my_flow.deploy(\n "example-deployment",\n work_pool_name="my-work-pool",\n image="my-repository/my-image:dev",\n )\n
\n

\n\n
Deploy a remotely stored flow to a work pool:\n<div class=\"pdoc-code codehilite\">\n<pre><span></span><code><span class=\"kn\">from</span> <span class=\"nn\">prefect</span> <span class=\"kn\">import</span> <span class=\"n\">flow</span>\n\n<span class=\"k\">if</span> <span class=\"vm\">__name__</span> <span class=\"o\">==</span> <span class=\"s2\">&quot;__main__&quot;</span><span class=\"p\">:</span>\n    <span class=\"n\">flow</span><span class=\"o\">.</span><span class=\"n\">from_source</span><span class=\"p\">(</span>\n        <span class=\"n\">source</span><span class=\"o\">=</span><span class=\"s2\">&quot;https://github.com/org/repo.git&quot;</span><span class=\"p\">,</span>\n        <span class=\"n\">entrypoint</span><span class=\"o\">=</span><span class=\"s2\">&quot;flows.py:my_flow&quot;</span><span class=\"p\">,</span>\n    <span class=\"p\">)</span><span class=\"o\">.</span><span class=\"n\">deploy</span><span class=\"p\">(</span>\n        <span class=\"s2\">&quot;example-deployment&quot;</span><span class=\"p\">,</span>\n        <span class=\"n\">work_pool_name</span><span class=\"o\">=</span><span class=\"s2\">&quot;my-work-pool&quot;</span><span class=\"p\">,</span>\n        <span class=\"n\">image</span><span class=\"o\">=</span><span class=\"s2\">&quot;my-repository/my-image:dev&quot;</span><span class=\"p\">,</span>\n    <span class=\"p\">)</span>\n</code></pre>\n</div>\n
\n", "signature": "(\tself,\tname: str,\twork_pool_name: Optional[str] = None,\timage: Union[str, prefect.deployments.runner.DeploymentImage, NoneType] = None,\tbuild: bool = True,\tpush: bool = True,\twork_queue_name: Optional[str] = None,\tjob_variables: Optional[dict] = None,\tinterval: Union[int, float, datetime.timedelta, NoneType] = None,\tcron: Optional[str] = None,\trrule: Optional[str] = None,\tpaused: Optional[bool] = None,\tschedules: Optional[List[prefect.client.schemas.objects.MinimalDeploymentSchedule]] = None,\tschedule: Union[prefect.client.schemas.schedules.IntervalSchedule, prefect.client.schemas.schedules.CronSchedule, prefect.client.schemas.schedules.RRuleSchedule, prefect.client.schemas.schedules.NoSchedule, NoneType] = None,\tis_schedule_active: Optional[bool] = None,\ttriggers: Optional[List[Union[prefect.events.schemas.deployment_triggers.DeploymentEventTrigger, prefect.events.schemas.deployment_triggers.DeploymentMetricTrigger, prefect.events.schemas.deployment_triggers.DeploymentCompoundTrigger, prefect.events.schemas.deployment_triggers.DeploymentSequenceTrigger, prefect.events.schemas.automations.EventTrigger, prefect.events.schemas.automations.MetricTrigger, prefect.events.schemas.automations.CompoundTrigger, prefect.events.schemas.automations.SequenceTrigger]]] = None,\tparameters: Optional[dict] = None,\tdescription: Optional[str] = None,\ttags: Optional[List[str]] = None,\tversion: Optional[str] = None,\tenforce_parameter_schema: bool = False,\tentrypoint_type: prefect.deployments.runner.EntrypointType = <EntrypointType.FILE_PATH: 'file_path'>,\tprint_next_steps: bool = True,\tignore_warnings: bool = False) -> uuid.UUID:", "funcdef": "async def" }, "prefect.Flow.visualize": { "fullname": "prefect.Flow.visualize", "modulename": "prefect", "qualname": "Flow.visualize", "kind": "function", "doc": "

Generates a graphviz object representing the current flow. In IPython notebooks,\nit's rendered inline, otherwise in a new window as a PNG.

\n\n

Raises:\n - ImportError: If graphviz isn't installed.\n - GraphvizExecutableNotFoundError: If the dot executable isn't found.\n - FlowVisualizationError: If the flow can't be visualized for any other reason.

\n", "signature": "(self, *args, **kwargs):", "funcdef": "async def" }, "prefect.get_client": { "fullname": "prefect.get_client", "modulename": "prefect", "qualname": "get_client", "kind": "function", "doc": "

Retrieve a HTTP client for communicating with the Prefect REST API.

\n\n

The client must be context managed; for example:

\n\n
\n
async with get_client() as client:\n    await client.hello()\n
\n
\n\n

To return a synchronous client, pass sync_client=True:

\n\n
\n
with get_client(sync_client=True) as client:\n    client.hello()\n
\n
\n", "signature": "(\thttpx_settings: Optional[Dict[str, Any]] = None,\tsync_client: bool = False) -> Union[prefect.client.orchestration.PrefectClient, prefect.client.orchestration.SyncPrefectClient]:", "funcdef": "def" }, "prefect.get_run_logger": { "fullname": "prefect.get_run_logger", "modulename": "prefect", "qualname": "get_run_logger", "kind": "function", "doc": "

Get a Prefect logger for the current task run or flow run.

\n\n

The logger will be named either prefect.task_runs or prefect.flow_runs.\nContextual data about the run will be attached to the log records.

\n\n

These loggers are connected to the APILogHandler by default to send log records to\nthe API.

\n\n

Arguments:\n context: A specific context may be provided as an override. By default, the\n context is inferred from global state and this should not be needed.\n **kwargs: Additional keyword arguments will be attached to the log records in\n addition to the run metadata

\n\n

Raises:\n RuntimeError: If no context can be found

\n", "signature": "(\tcontext: prefect.context.RunContext = None,\t**kwargs: str) -> Union[logging.Logger, logging.LoggerAdapter]:", "funcdef": "def" }, "prefect.Manifest": { "fullname": "prefect.Manifest", "modulename": "prefect", "qualname": "Manifest", "kind": "class", "doc": "

A JSON representation of a flow.

\n", "bases": "prefect._internal.pydantic._compat.BaseModel" }, "prefect.Manifest.model_config": { "fullname": "prefect.Manifest.model_config", "modulename": "prefect", "qualname": "Manifest.model_config", "kind": "variable", "doc": "

\n", "annotation": ": ClassVar[pydantic.v1.config.ConfigDict]" }, "prefect.Manifest.flow_name": { "fullname": "prefect.Manifest.flow_name", "modulename": "prefect", "qualname": "Manifest.flow_name", "kind": "variable", "doc": "

\n", "annotation": ": str" }, "prefect.Manifest.import_path": { "fullname": "prefect.Manifest.import_path", "modulename": "prefect", "qualname": "Manifest.import_path", "kind": "variable", "doc": "

\n", "annotation": ": str" }, "prefect.Manifest.parameter_openapi_schema": { "fullname": "prefect.Manifest.parameter_openapi_schema", "modulename": "prefect", "qualname": "Manifest.parameter_openapi_schema", "kind": "variable", "doc": "

\n", "annotation": ": prefect.utilities.callables.ParameterSchema" }, "prefect.Manifest.model_fields": { "fullname": "prefect.Manifest.model_fields", "modulename": "prefect", "qualname": "Manifest.model_fields", "kind": "variable", "doc": "

\n", "annotation": ": ClassVar[Dict[str, pydantic.v1.fields.FieldInfo]]", "default_value": "{'flow_name': ModelField(name='flow_name', type=str, required=True), 'import_path': ModelField(name='import_path', type=str, required=True), 'parameter_openapi_schema': ModelField(name='parameter_openapi_schema', type=ParameterSchema, required=True)}" }, "prefect.State": { "fullname": "prefect.State", "modulename": "prefect", "qualname": "State", "kind": "class", "doc": "

The state of a run.

\n", "bases": "prefect._internal.schemas.bases.ObjectBaseModel, typing.Generic[~R]" }, "prefect.State.type": { "fullname": "prefect.State.type", "modulename": "prefect", "qualname": "State.type", "kind": "variable", "doc": "

\n", "annotation": ": prefect.client.schemas.objects.StateType" }, "prefect.State.name": { "fullname": "prefect.State.name", "modulename": "prefect", "qualname": "State.name", "kind": "variable", "doc": "

\n", "annotation": ": Optional[str]" }, "prefect.State.timestamp": { "fullname": "prefect.State.timestamp", "modulename": "prefect", "qualname": "State.timestamp", "kind": "variable", "doc": "

\n", "annotation": ": pendulum.datetime.DateTime" }, "prefect.State.message": { "fullname": "prefect.State.message", "modulename": "prefect", "qualname": "State.message", "kind": "variable", "doc": "

\n", "annotation": ": Optional[str]" }, "prefect.State.state_details": { "fullname": "prefect.State.state_details", "modulename": "prefect", "qualname": "State.state_details", "kind": "variable", "doc": "

\n", "annotation": ": prefect.client.schemas.objects.StateDetails" }, "prefect.State.data": { "fullname": "prefect.State.data", "modulename": "prefect", "qualname": "State.data", "kind": "variable", "doc": "

\n", "annotation": ": Union[prefect.results.BaseResult[~R], prefect.deprecated.data_documents.DataDocument[~R], Any]" }, "prefect.State.result": { "fullname": "prefect.State.result", "modulename": "prefect", "qualname": "State.result", "kind": "function", "doc": "

Retrieve the result attached to this state.

\n\n

Args:\n raise_on_failure: a boolean specifying whether to raise an exception\n if the state is of type FAILED and the underlying data is an exception\n fetch: a boolean specifying whether to resolve references to persisted\n results into data. For synchronous users, this defaults to True.\n For asynchronous users, this defaults to False for backwards\n compatibility.

\n\n

Raises:\n TypeError: If the state is failed but the result is not an exception.

\n\n

Returns:\n The result of the run

\n\n

Examples:

\n\n
\n
\n
\n

from prefect import flow, task\n @task\n def my_task(x):\n return x

\n\n
Get the result from a task future in a flow\n\n>>> @flow\n>>> def my_flow():\n>>>     future = my_task(\"hello\")\n>>>     state = future.wait()\n>>>     result = state.result()\n>>>     print(result)\n>>> my_flow()\nhello\n\nGet the result from a flow state\n\n>>> @flow\n>>> def my_flow():\n>>>     return \"hello\"\n>>> my_flow(return_state=True).result()\nhello\n\nGet the result from a failed state\n\n>>> @flow\n>>> def my_flow():\n>>>     raise ValueError(\"oh no!\")\n>>> state = my_flow(return_state=True)  # Error is wrapped in FAILED state\n>>> state.result()  # Raises `ValueError`\n\nGet the result from a failed state without erroring\n\n>>> @flow\n>>> def my_flow():\n>>>     raise ValueError(\"oh no!\")\n>>> state = my_flow(return_state=True)\n>>> result = state.result(raise_on_failure=False)\n>>> print(result)\nValueError(\"oh no!\")\n\n\nGet the result from a flow state in an async context\n\n>>> @flow\n>>> async def my_flow():\n>>>     return \"hello\"\n>>> state = await my_flow(return_state=True)\n>>> await state.result()\nhello\n
\n
\n
\n
\n", "signature": "(\tself,\traise_on_failure: bool = True,\tfetch: Optional[bool] = None) -> Union[~R, Exception]:", "funcdef": "def" }, "prefect.State.to_state_create": { "fullname": "prefect.State.to_state_create", "modulename": "prefect", "qualname": "State.to_state_create", "kind": "function", "doc": "

Convert this state to a StateCreate type which can be used to set the state of\na run in the API.

\n\n

This method will drop this state's data if it is not a result type. Only\nresults should be sent to the API. Other data is only available locally.

\n", "signature": "(self):", "funcdef": "def" }, "prefect.State.default_name_from_type": { "fullname": "prefect.State.default_name_from_type", "modulename": "prefect", "qualname": "State.default_name_from_type", "kind": "function", "doc": "

\n", "signature": "(cls, v, *, values, **kwargs):", "funcdef": "def" }, "prefect.State.default_scheduled_start_time": { "fullname": "prefect.State.default_scheduled_start_time", "modulename": "prefect", "qualname": "State.default_scheduled_start_time", "kind": "function", "doc": "

TODO: This should throw an error instead of setting a default but is out of\n scope for https://github.com/PrefectHQ/orion/pull/174/ and can be rolled\n into work refactoring state initialization

\n", "signature": "(cls, values):", "funcdef": "def" }, "prefect.State.is_scheduled": { "fullname": "prefect.State.is_scheduled", "modulename": "prefect", "qualname": "State.is_scheduled", "kind": "function", "doc": "

\n", "signature": "(self) -> bool:", "funcdef": "def" }, "prefect.State.is_pending": { "fullname": "prefect.State.is_pending", "modulename": "prefect", "qualname": "State.is_pending", "kind": "function", "doc": "

\n", "signature": "(self) -> bool:", "funcdef": "def" }, "prefect.State.is_running": { "fullname": "prefect.State.is_running", "modulename": "prefect", "qualname": "State.is_running", "kind": "function", "doc": "

\n", "signature": "(self) -> bool:", "funcdef": "def" }, "prefect.State.is_completed": { "fullname": "prefect.State.is_completed", "modulename": "prefect", "qualname": "State.is_completed", "kind": "function", "doc": "

\n", "signature": "(self) -> bool:", "funcdef": "def" }, "prefect.State.is_failed": { "fullname": "prefect.State.is_failed", "modulename": "prefect", "qualname": "State.is_failed", "kind": "function", "doc": "

\n", "signature": "(self) -> bool:", "funcdef": "def" }, "prefect.State.is_crashed": { "fullname": "prefect.State.is_crashed", "modulename": "prefect", "qualname": "State.is_crashed", "kind": "function", "doc": "

\n", "signature": "(self) -> bool:", "funcdef": "def" }, "prefect.State.is_cancelled": { "fullname": "prefect.State.is_cancelled", "modulename": "prefect", "qualname": "State.is_cancelled", "kind": "function", "doc": "

\n", "signature": "(self) -> bool:", "funcdef": "def" }, "prefect.State.is_cancelling": { "fullname": "prefect.State.is_cancelling", "modulename": "prefect", "qualname": "State.is_cancelling", "kind": "function", "doc": "

\n", "signature": "(self) -> bool:", "funcdef": "def" }, "prefect.State.is_final": { "fullname": "prefect.State.is_final", "modulename": "prefect", "qualname": "State.is_final", "kind": "function", "doc": "

\n", "signature": "(self) -> bool:", "funcdef": "def" }, "prefect.State.is_paused": { "fullname": "prefect.State.is_paused", "modulename": "prefect", "qualname": "State.is_paused", "kind": "function", "doc": "

\n", "signature": "(self) -> bool:", "funcdef": "def" }, "prefect.State.copy": { "fullname": "prefect.State.copy", "modulename": "prefect", "qualname": "State.copy", "kind": "function", "doc": "

Copying API models should return an object that could be inserted into the\ndatabase again. The 'timestamp' is reset using the default factory.

\n", "signature": "(\tself,\t*,\tupdate: Optional[Dict[str, Any]] = None,\treset_fields: bool = False,\t**kwargs):", "funcdef": "def" }, "prefect.tags": { "fullname": "prefect.tags", "modulename": "prefect", "qualname": "tags", "kind": "function", "doc": "

Context manager to add tags to flow and task run calls.

\n\n

Tags are always combined with any existing tags.

\n\n

Yields:\n The current set of tags

\n\n

Examples:

\n\n
\n
\n
\n

from prefect import tags, task, flow\n @task\n def my_task():\n pass

\n\n
Run a task with tags\n\n>>> @flow\n>>> def my_flow():\n>>>     with tags(\"a\", \"b\"):\n>>>         my_task()  # has tags: a, b\n\nRun a flow with tags\n\n>>> @flow\n>>> def my_flow():\n>>>     pass\n>>> with tags(\"a\", \"b\"):\n>>>     my_flow()  # has tags: a, b\n\nRun a task with nested tag contexts\n\n>>> @flow\n>>> def my_flow():\n>>>     with tags(\"a\", \"b\"):\n>>>         with tags(\"c\", \"d\"):\n>>>             my_task()  # has tags: a, b, c, d\n>>>         my_task()  # has tags: a, b\n\nInspect the current tags\n\n>>> @flow\n>>> def my_flow():\n>>>     with tags(\"c\", \"d\"):\n>>>         with tags(\"e\", \"f\") as current_tags:\n>>>              print(current_tags)\n>>> with tags(\"a\", \"b\"):\n>>>     my_flow()\n{\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"}\n
\n
\n
\n
\n", "signature": "(*new_tags: str) -> Generator[Set[str], NoneType, NoneType]:", "funcdef": "def" }, "prefect.task": { "fullname": "prefect.task", "modulename": "prefect", "qualname": "task", "kind": "function", "doc": "

Decorator to designate a function as a task in a Prefect workflow.

\n\n

This decorator may be used for asynchronous or synchronous functions.

\n\n

Args:\n name: An optional name for the task; if not provided, the name will be inferred\n from the given function.\n description: An optional string description for the task.\n tags: An optional set of tags to be associated with runs of this task. These\n tags are combined with any tags defined by a prefect.tags context at\n task runtime.\n version: An optional string specifying the version of this task definition\n cache_key_fn: An optional callable that, given the task run context and call\n parameters, generates a string key; if the key matches a previous completed\n state, that state result will be restored instead of running the task again.\n cache_expiration: An optional amount of time indicating how long cached states\n for this task should be restorable; if not provided, cached states will\n never expire.\n task_run_name: An optional name to distinguish runs of this task; this name can be provided\n as a string template with the task's keyword arguments as variables,\n or a function that returns a string.\n retries: An optional number of times to retry on task run failure\n retry_delay_seconds: Optionally configures how long to wait before retrying the\n task after failure. This is only applicable if retries is nonzero. This\n setting can either be a number of seconds, a list of retry delays, or a\n callable that, given the total number of retries, generates a list of retry\n delays. If a number of seconds, that delay will be applied to all retries.\n If a list, each retry will wait for the corresponding delay before retrying.\n When passing a callable or a list, the number of configured retry delays\n cannot exceed 50.\n retry_jitter_factor: An optional factor that defines the factor to which a retry\n can be jittered in order to avoid a \"thundering herd\".\n persist_result: An optional toggle indicating whether the result of this task\n should be persisted to result storage. Defaults to None, which indicates\n that Prefect should choose whether the result should be persisted depending on\n the features being used.\n result_storage: An optional block to use to persist the result of this task.\n Defaults to the value set in the flow the task is called in.\n result_storage_key: An optional key to store the result in storage at when persisted.\n Defaults to a unique identifier.\n result_serializer: An optional serializer to use to serialize the result of this\n task for persistence. Defaults to the value set in the flow the task is\n called in.\n timeout_seconds: An optional number of seconds indicating a maximum runtime for\n the task. If the task exceeds this runtime, it will be marked as failed.\n log_prints: If set, print statements in the task will be redirected to the\n Prefect logger for the task run. Defaults to None, which indicates\n that the value from the flow should be used.\n refresh_cache: If set, cached results for the cache key are not used.\n Defaults to None, which indicates that a cached result from a previous\n execution with matching cache key is used.\n on_failure: An optional list of callables to run when the task enters a failed state.\n on_completion: An optional list of callables to run when the task enters a completed state.\n retry_condition_fn: An optional callable run when a task run returns a Failed state. Should\n return True if the task should continue to its retry policy (e.g. retries=3), and False if the task\n should end as failed. Defaults to None, indicating the task should always continue\n to its retry policy.\n viz_return_value: An optional value to return when the task dependency tree is visualized.

\n\n

Returns:\n A callable Task object which, when called, will submit the task for execution.

\n\n

Examples:\n Define a simple task

\n\n
>>> @task\n>>> def add(x, y):\n>>>     return x + y\n\nDefine an async task\n\n>>> @task\n>>> async def add(x, y):\n>>>     return x + y\n\nDefine a task with tags and a description\n\n>>> @task(tags={\"a\", \"b\"}, description=\"This task is empty but its my first!\")\n>>> def my_task():\n>>>     pass\n\nDefine a task with a custom name\n\n>>> @task(name=\"The Ultimate Task\")\n>>> def my_task():\n>>>     pass\n\nDefine a task that retries 3 times with a 5 second delay between attempts\n\n>>> from random import randint\n>>>\n>>> @task(retries=3, retry_delay_seconds=5)\n>>> def my_task():\n>>>     x = randint(0, 5)\n>>>     if x >= 3:  # Make a task that fails sometimes\n>>>         raise ValueError(\"Retry me please!\")\n>>>     return x\n\nDefine a task that is cached for a day based on its inputs\n\n>>> from prefect.tasks import task_input_hash\n>>> from datetime import timedelta\n>>>\n>>> @task(cache_key_fn=task_input_hash, cache_expiration=timedelta(days=1))\n>>> def my_task():\n>>>     return \"hello\"\n
\n", "signature": "(\t__fn=None,\t*,\tname: str = None,\tdescription: str = None,\ttags: Iterable[str] = None,\tversion: str = None,\tcache_key_fn: Callable[[prefect.context.TaskRunContext, Dict[str, Any]], Optional[str]] = None,\tcache_expiration: datetime.timedelta = None,\ttask_run_name: Union[Callable[[], str], str, NoneType] = None,\tretries: int = None,\tretry_delay_seconds: Union[float, int, List[float], Callable[[int], List[float]]] = None,\tretry_jitter_factor: Optional[float] = None,\tpersist_result: Optional[bool] = None,\tresult_storage: Union[prefect.filesystems.WritableFileSystem, str, NoneType] = None,\tresult_storage_key: Optional[str] = None,\tresult_serializer: Union[prefect.serializers.Serializer, str, NoneType] = None,\tcache_result_in_memory: bool = True,\ttimeout_seconds: Union[int, float] = None,\tlog_prints: Optional[bool] = None,\trefresh_cache: Optional[bool] = None,\ton_completion: Optional[List[Callable[[prefect.tasks.Task, prefect.client.schemas.objects.TaskRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_failure: Optional[List[Callable[[prefect.tasks.Task, prefect.client.schemas.objects.TaskRun, prefect.client.schemas.objects.State], NoneType]]] = None,\tretry_condition_fn: Optional[Callable[[prefect.tasks.Task, prefect.client.schemas.objects.TaskRun, prefect.client.schemas.objects.State], bool]] = None,\tviz_return_value: Any = None):", "funcdef": "def" }, "prefect.Task": { "fullname": "prefect.Task", "modulename": "prefect", "qualname": "Task", "kind": "class", "doc": "

A Prefect task definition.

\n\n

!!! note\n We recommend using [the @task decorator][prefect.tasks.task] for most use-cases.

\n\n

Wraps a function with an entrypoint to the Prefect engine. Calling this class within a flow function\ncreates a new task run.

\n\n

To preserve the input and output types, we use the generic type variables P and R for \"Parameters\" and\n\"Returns\" respectively.

\n\n

Args:\n fn: The function defining the task.\n name: An optional name for the task; if not provided, the name will be inferred\n from the given function.\n description: An optional string description for the task.\n tags: An optional set of tags to be associated with runs of this task. These\n tags are combined with any tags defined by a prefect.tags context at\n task runtime.\n version: An optional string specifying the version of this task definition\n cache_key_fn: An optional callable that, given the task run context and call\n parameters, generates a string key; if the key matches a previous completed\n state, that state result will be restored instead of running the task again.\n cache_expiration: An optional amount of time indicating how long cached states\n for this task should be restorable; if not provided, cached states will\n never expire.\n task_run_name: An optional name to distinguish runs of this task; this name can be provided\n as a string template with the task's keyword arguments as variables,\n or a function that returns a string.\n retries: An optional number of times to retry on task run failure.\n retry_delay_seconds: Optionally configures how long to wait before retrying the\n task after failure. This is only applicable if retries is nonzero. This\n setting can either be a number of seconds, a list of retry delays, or a\n callable that, given the total number of retries, generates a list of retry\n delays. If a number of seconds, that delay will be applied to all retries.\n If a list, each retry will wait for the corresponding delay before retrying.\n When passing a callable or a list, the number of configured retry delays\n cannot exceed 50.\n retry_jitter_factor: An optional factor that defines the factor to which a retry\n can be jittered in order to avoid a \"thundering herd\".\n persist_result: An optional toggle indicating whether the result of this task\n should be persisted to result storage. Defaults to None, which indicates\n that Prefect should choose whether the result should be persisted depending on\n the features being used.\n result_storage: An optional block to use to persist the result of this task.\n Defaults to the value set in the flow the task is called in.\n result_storage_key: An optional key to store the result in storage at when persisted.\n Defaults to a unique identifier.\n result_serializer: An optional serializer to use to serialize the result of this\n task for persistence. Defaults to the value set in the flow the task is\n called in.\n timeout_seconds: An optional number of seconds indicating a maximum runtime for\n the task. If the task exceeds this runtime, it will be marked as failed.\n log_prints: If set, print statements in the task will be redirected to the\n Prefect logger for the task run. Defaults to None, which indicates\n that the value from the flow should be used.\n refresh_cache: If set, cached results for the cache key are not used.\n Defaults to None, which indicates that a cached result from a previous\n execution with matching cache key is used.\n on_failure: An optional list of callables to run when the task enters a failed state.\n on_completion: An optional list of callables to run when the task enters a completed state.\n retry_condition_fn: An optional callable run when a task run returns a Failed state. Should\n return True if the task should continue to its retry policy (e.g. retries=3), and False if the task\n should end as failed. Defaults to None, indicating the task should always continue\n to its retry policy.\n viz_return_value: An optional value to return when the task dependency tree is visualized.

\n", "bases": "typing.Generic[~P, ~R]" }, "prefect.Task.__init__": { "fullname": "prefect.Task.__init__", "modulename": "prefect", "qualname": "Task.__init__", "kind": "function", "doc": "

\n", "signature": "(\tfn: Callable[~P, ~R],\tname: Optional[str] = None,\tdescription: Optional[str] = None,\ttags: Optional[Iterable[str]] = None,\tversion: Optional[str] = None,\tcache_key_fn: Optional[Callable[[prefect.context.TaskRunContext, Dict[str, Any]], Optional[str]]] = None,\tcache_expiration: Optional[datetime.timedelta] = None,\ttask_run_name: Union[Callable[[], str], str, NoneType] = None,\tretries: Optional[int] = None,\tretry_delay_seconds: Union[float, int, List[float], Callable[[int], List[float]], NoneType] = None,\tretry_jitter_factor: Optional[float] = None,\tpersist_result: Optional[bool] = None,\tresult_storage: Union[prefect.filesystems.WritableFileSystem, str, NoneType] = None,\tresult_serializer: Union[prefect.serializers.Serializer, str, NoneType] = None,\tresult_storage_key: Optional[str] = None,\tcache_result_in_memory: bool = True,\ttimeout_seconds: Union[int, float, NoneType] = None,\tlog_prints: Optional[bool] = False,\trefresh_cache: Optional[bool] = None,\ton_completion: Optional[List[Callable[[prefect.tasks.Task, prefect.client.schemas.objects.TaskRun, prefect.client.schemas.objects.State], NoneType]]] = None,\ton_failure: Optional[List[Callable[[prefect.tasks.Task, prefect.client.schemas.objects.TaskRun, prefect.client.schemas.objects.State], NoneType]]] = None,\tretry_condition_fn: Optional[Callable[[prefect.tasks.Task, prefect.client.schemas.objects.TaskRun, prefect.client.schemas.objects.State], bool]] = None,\tviz_return_value: Optional[Any] = None)" }, "prefect.Task.description": { "fullname": "prefect.Task.description", "modulename": "prefect", "qualname": "Task.description", "kind": "variable", "doc": "

\n" }, "prefect.Task.fn": { "fullname": "prefect.Task.fn", "modulename": "prefect", "qualname": "Task.fn", "kind": "variable", "doc": "

\n" }, "prefect.Task.isasync": { "fullname": "prefect.Task.isasync", "modulename": "prefect", "qualname": "Task.isasync", "kind": "variable", "doc": "

\n" }, "prefect.Task.task_run_name": { "fullname": "prefect.Task.task_run_name", "modulename": "prefect", "qualname": "Task.task_run_name", "kind": "variable", "doc": "

\n" }, "prefect.Task.version": { "fullname": "prefect.Task.version", "modulename": "prefect", "qualname": "Task.version", "kind": "variable", "doc": "

\n" }, "prefect.Task.log_prints": { "fullname": "prefect.Task.log_prints", "modulename": "prefect", "qualname": "Task.log_prints", "kind": "variable", "doc": "

\n" }, "prefect.Task.tags": { "fullname": "prefect.Task.tags", "modulename": "prefect", "qualname": "Task.tags", "kind": "variable", "doc": "

\n" }, "prefect.Task.cache_key_fn": { "fullname": "prefect.Task.cache_key_fn", "modulename": "prefect", "qualname": "Task.cache_key_fn", "kind": "variable", "doc": "

\n" }, "prefect.Task.cache_expiration": { "fullname": "prefect.Task.cache_expiration", "modulename": "prefect", "qualname": "Task.cache_expiration", "kind": "variable", "doc": "

\n" }, "prefect.Task.refresh_cache": { "fullname": "prefect.Task.refresh_cache", "modulename": "prefect", "qualname": "Task.refresh_cache", "kind": "variable", "doc": "

\n" }, "prefect.Task.retries": { "fullname": "prefect.Task.retries", "modulename": "prefect", "qualname": "Task.retries", "kind": "variable", "doc": "

\n" }, "prefect.Task.retry_jitter_factor": { "fullname": "prefect.Task.retry_jitter_factor", "modulename": "prefect", "qualname": "Task.retry_jitter_factor", "kind": "variable", "doc": "

\n" }, "prefect.Task.persist_result": { "fullname": "prefect.Task.persist_result", "modulename": "prefect", "qualname": "Task.persist_result", "kind": "variable", "doc": "

\n" }, "prefect.Task.result_storage": { "fullname": "prefect.Task.result_storage", "modulename": "prefect", "qualname": "Task.result_storage", "kind": "variable", "doc": "

\n" }, "prefect.Task.result_serializer": { "fullname": "prefect.Task.result_serializer", "modulename": "prefect", "qualname": "Task.result_serializer", "kind": "variable", "doc": "

\n" }, "prefect.Task.result_storage_key": { "fullname": "prefect.Task.result_storage_key", "modulename": "prefect", "qualname": "Task.result_storage_key", "kind": "variable", "doc": "

\n" }, "prefect.Task.cache_result_in_memory": { "fullname": "prefect.Task.cache_result_in_memory", "modulename": "prefect", "qualname": "Task.cache_result_in_memory", "kind": "variable", "doc": "

\n" }, "prefect.Task.timeout_seconds": { "fullname": "prefect.Task.timeout_seconds", "modulename": "prefect", "qualname": "Task.timeout_seconds", "kind": "variable", "doc": "

\n" }, "prefect.Task.on_completion": { "fullname": "prefect.Task.on_completion", "modulename": "prefect", "qualname": "Task.on_completion", "kind": "variable", "doc": "

\n" }, "prefect.Task.on_failure": { "fullname": "prefect.Task.on_failure", "modulename": "prefect", "qualname": "Task.on_failure", "kind": "variable", "doc": "

\n" }, "prefect.Task.retry_condition_fn": { "fullname": "prefect.Task.retry_condition_fn", "modulename": "prefect", "qualname": "Task.retry_condition_fn", "kind": "variable", "doc": "

\n" }, "prefect.Task.viz_return_value": { "fullname": "prefect.Task.viz_return_value", "modulename": "prefect", "qualname": "Task.viz_return_value", "kind": "variable", "doc": "

\n" }, "prefect.Task.with_options": { "fullname": "prefect.Task.with_options", "modulename": "prefect", "qualname": "Task.with_options", "kind": "function", "doc": "

Create a new task from the current object, updating provided options.

\n\n

Args:\n name: A new name for the task.\n description: A new description for the task.\n tags: A new set of tags for the task. If given, existing tags are ignored,\n not merged.\n cache_key_fn: A new cache key function for the task.\n cache_expiration: A new cache expiration time for the task.\n task_run_name: An optional name to distinguish runs of this task; this name can be provided\n as a string template with the task's keyword arguments as variables,\n or a function that returns a string.\n retries: A new number of times to retry on task run failure.\n retry_delay_seconds: Optionally configures how long to wait before retrying\n the task after failure. This is only applicable if retries is nonzero.\n This setting can either be a number of seconds, a list of retry delays,\n or a callable that, given the total number of retries, generates a list\n of retry delays. If a number of seconds, that delay will be applied to\n all retries. If a list, each retry will wait for the corresponding delay\n before retrying. When passing a callable or a list, the number of\n configured retry delays cannot exceed 50.\n retry_jitter_factor: An optional factor that defines the factor to which a\n retry can be jittered in order to avoid a \"thundering herd\".\n persist_result: A new option for enabling or disabling result persistence.\n result_storage: A new storage type to use for results.\n result_serializer: A new serializer to use for results.\n result_storage_key: A new key for the persisted result to be stored at.\n timeout_seconds: A new maximum time for the task to complete in seconds.\n log_prints: A new option for enabling or disabling redirection of print statements.\n refresh_cache: A new option for enabling or disabling cache refresh.\n on_completion: A new list of callables to run when the task enters a completed state.\n on_failure: A new list of callables to run when the task enters a failed state.\n retry_condition_fn: An optional callable run when a task run returns a Failed state.\n Should return True if the task should continue to its retry policy, and False\n if the task should end as failed. Defaults to None, indicating the task should\n always continue to its retry policy.\n viz_return_value: An optional value to return when the task dependency tree is visualized.

\n\n

Returns:\n A new Task instance.

\n\n

Examples:

\n\n
Create a new task from an existing task and update the name\n\n>>> @task(name=\"My task\")\n>>> def my_task():\n>>>     return 1\n>>>\n>>> new_task = my_task.with_options(name=\"My new task\")\n\nCreate a new task from an existing task and update the retry settings\n\n>>> from random import randint\n>>>\n>>> @task(retries=1, retry_delay_seconds=5)\n>>> def my_task():\n>>>     x = randint(0, 5)\n>>>     if x >= 3:  # Make a task that fails sometimes\n>>>         raise ValueError(\"Retry me please!\")\n>>>     return x\n>>>\n>>> new_task = my_task.with_options(retries=5, retry_delay_seconds=2)\n\nUse a task with updated options within a flow\n\n>>> @task(name=\"My task\")\n>>> def my_task():\n>>>     return 1\n>>>\n>>> @flow\n>>> my_flow():\n>>>     new_task = my_task.with_options(name=\"My new task\")\n>>>     new_task()\n
\n", "signature": "(\tself,\t*,\tname: str = None,\tdescription: str = None,\ttags: Iterable[str] = None,\tcache_key_fn: Callable[[prefect.context.TaskRunContext, Dict[str, Any]], Optional[str]] = None,\ttask_run_name: Union[Callable[[], str], str, NoneType] = None,\tcache_expiration: datetime.timedelta = None,\tretries: Optional[int] = <class 'prefect.utilities.annotations.NotSet'>,\tretry_delay_seconds: Union[float, int, List[float], Callable[[int], List[float]]] = <class 'prefect.utilities.annotations.NotSet'>,\tretry_jitter_factor: Optional[float] = <class 'prefect.utilities.annotations.NotSet'>,\tpersist_result: Optional[bool] = <class 'prefect.utilities.annotations.NotSet'>,\tresult_storage: Union[prefect.filesystems.WritableFileSystem, str, NoneType] = <class 'prefect.utilities.annotations.NotSet'>,\tresult_serializer: Union[prefect.serializers.Serializer, str, NoneType] = <class 'prefect.utilities.annotations.NotSet'>,\tresult_storage_key: Optional[str] = <class 'prefect.utilities.annotations.NotSet'>,\tcache_result_in_memory: Optional[bool] = None,\ttimeout_seconds: Union[int, float] = None,\tlog_prints: Optional[bool] = <class 'prefect.utilities.annotations.NotSet'>,\trefresh_cache: Optional[bool] = <class 'prefect.utilities.annotations.NotSet'>,\ton_completion: Optional[List[Callable[[prefect.tasks.Task, prefect.client.schemas.objects.TaskRun, prefect.client.schemas.objects.State], Optional[Awaitable[NoneType]]]]] = None,\ton_failure: Optional[List[Callable[[prefect.tasks.Task, prefect.client.schemas.objects.TaskRun, prefect.client.schemas.objects.State], Optional[Awaitable[NoneType]]]]] = None,\tretry_condition_fn: Optional[Callable[[prefect.tasks.Task, prefect.client.schemas.objects.TaskRun, prefect.client.schemas.objects.State], bool]] = None,\tviz_return_value: Optional[Any] = None):", "funcdef": "def" }, "prefect.Task.create_run": { "fullname": "prefect.Task.create_run", "modulename": "prefect", "qualname": "Task.create_run", "kind": "function", "doc": "

\n", "signature": "(\tself,\tclient: Union[prefect.client.orchestration.PrefectClient, prefect.client.orchestration.SyncPrefectClient, NoneType],\tparameters: Dict[str, Any] = None,\tflow_run_context: Optional[prefect.context.EngineContext] = None,\tparent_task_run_context: Optional[prefect.context.TaskRunContext] = None,\twait_for: Optional[Iterable[prefect.futures.PrefectFuture]] = None,\textra_task_inputs: Optional[Dict[str, Set[prefect.client.schemas.objects.TaskRunInput]]] = None) -> prefect.client.schemas.objects.TaskRun:", "funcdef": "async def" }, "prefect.Task.submit": { "fullname": "prefect.Task.submit", "modulename": "prefect", "qualname": "Task.submit", "kind": "function", "doc": "

Submit a run of the task to the engine.

\n\n

If writing an async task, this call must be awaited.

\n\n

If called from within a flow function,

\n\n

Will create a new task run in the backing API and submit the task to the flow's\ntask runner. This call only blocks execution while the task is being submitted,\nonce it is submitted, the flow function will continue executing. However, note\nthat the SequentialTaskRunner does not implement parallel execution for sync tasks\nand they are fully resolved on submission.

\n\n

Args:\n args: Arguments to run the task with\n return_state: Return the result of the flow run wrapped in a\n Prefect State.\n wait_for: Upstream task futures to wait for before starting the task\n *kwargs: Keyword arguments to run the task with

\n\n

Returns:\n If return_state is False a future allowing asynchronous access to\n the state of the task\n If return_state is True a future wrapped in a Prefect State allowing asynchronous access to\n the state of the task

\n\n

Examples:

\n\n
Define a task\n\n>>> from prefect import task\n>>> @task\n>>> def my_task():\n>>>     return \"hello\"\n\nRun a task in a flow\n\n>>> from prefect import flow\n>>> @flow\n>>> def my_flow():\n>>>     my_task.submit()\n\nWait for a task to finish\n\n>>> @flow\n>>> def my_flow():\n>>>     my_task.submit().wait()\n\nUse the result from a task in a flow\n\n>>> @flow\n>>> def my_flow():\n>>>     print(my_task.submit().result())\n>>>\n>>> my_flow()\nhello\n\nRun an async task in an async flow\n\n>>> @task\n>>> async def my_async_task():\n>>>     pass\n>>>\n>>> @flow\n>>> async def my_flow():\n>>>     await my_async_task.submit()\n\nRun a sync task in an async flow\n\n>>> @flow\n>>> async def my_flow():\n>>>     my_task.submit()\n\nEnforce ordering between tasks that do not exchange data\n>>> @task\n>>> def task_1():\n>>>     pass\n>>>\n>>> @task\n>>> def task_2():\n>>>     pass\n>>>\n>>> @flow\n>>> def my_flow():\n>>>     x = task_1.submit()\n>>>\n>>>     # task 2 will wait for task_1 to complete\n>>>     y = task_2.submit(wait_for=[x])\n
\n", "signature": "(\tself,\t*args: Any,\treturn_state: bool = False,\twait_for: Optional[Iterable[prefect.futures.PrefectFuture]] = None,\t**kwargs: Any) -> Union[prefect.futures.PrefectFuture, Awaitable[prefect.futures.PrefectFuture], prefect.client.schemas.objects.TaskRun, Awaitable[prefect.client.schemas.objects.TaskRun]]:", "funcdef": "def" }, "prefect.Task.map": { "fullname": "prefect.Task.map", "modulename": "prefect", "qualname": "Task.map", "kind": "function", "doc": "

Submit a mapped run of the task to a worker.

\n\n

Must be called within a flow function. If writing an async task, this\ncall must be awaited.

\n\n

Must be called with at least one iterable and all iterables must be\nthe same length. Any arguments that are not iterable will be treated as\na static value and each task run will receive the same value.

\n\n

Will create as many task runs as the length of the iterable(s) in the\nbacking API and submit the task runs to the flow's task runner. This\ncall blocks if given a future as input while the future is resolved. It\nalso blocks while the tasks are being submitted, once they are\nsubmitted, the flow function will continue executing. However, note\nthat the SequentialTaskRunner does not implement parallel execution\nfor sync tasks and they are fully resolved on submission.

\n\n

Args:\n args: Iterable and static arguments to run the tasks with\n return_state: Return a list of Prefect States that wrap the results\n of each task run.\n wait_for: Upstream task futures to wait for before starting the\n task\n *kwargs: Keyword iterable arguments to run the task with

\n\n

Returns:\n A list of futures allowing asynchronous access to the state of the\n tasks

\n\n

Examples:

\n\n
Define a task\n\n>>> from prefect import task\n>>> @task\n>>> def my_task(x):\n>>>     return x + 1\n\nCreate mapped tasks\n\n>>> from prefect import flow\n>>> @flow\n>>> def my_flow():\n>>>     my_task.map([1, 2, 3])\n\nWait for all mapped tasks to finish\n\n>>> @flow\n>>> def my_flow():\n>>>     futures = my_task.map([1, 2, 3])\n>>>     for future in futures:\n>>>         future.wait()\n>>>     # Now all of the mapped tasks have finished\n>>>     my_task(10)\n\nUse the result from mapped tasks in a flow\n\n>>> @flow\n>>> def my_flow():\n>>>     futures = my_task.map([1, 2, 3])\n>>>     for future in futures:\n>>>         print(future.result())\n>>> my_flow()\n2\n3\n4\n\nEnforce ordering between tasks that do not exchange data\n>>> @task\n>>> def task_1(x):\n>>>     pass\n>>>\n>>> @task\n>>> def task_2(y):\n>>>     pass\n>>>\n>>> @flow\n>>> def my_flow():\n>>>     x = task_1.submit()\n>>>\n>>>     # task 2 will wait for task_1 to complete\n>>>     y = task_2.map([1, 2, 3], wait_for=[x])\n\nUse a non-iterable input as a constant across mapped tasks\n>>> @task\n>>> def display(prefix, item):\n>>>    print(prefix, item)\n>>>\n>>> @flow\n>>> def my_flow():\n>>>     display.map(\"Check it out: \", [1, 2, 3])\n>>>\n>>> my_flow()\nCheck it out: 1\nCheck it out: 2\nCheck it out: 3\n\nUse `unmapped` to treat an iterable argument as a constant\n>>> from prefect import unmapped\n>>>\n>>> @task\n>>> def add_n_to_items(items, n):\n>>>     return [item + n for item in items]\n>>>\n>>> @flow\n>>> def my_flow():\n>>>     return add_n_to_items.map(unmapped([10, 20]), n=[1, 2, 3])\n>>>\n>>> my_flow()\n[[11, 21], [12, 22], [13, 23]]\n
\n", "signature": "(\tself,\t*args: Any,\treturn_state: bool = False,\twait_for: Optional[Iterable[prefect.futures.PrefectFuture]] = None,\t**kwargs: Any) -> Any:", "funcdef": "def" }, "prefect.Task.serve": { "fullname": "prefect.Task.serve", "modulename": "prefect", "qualname": "Task.serve", "kind": "function", "doc": "

Serve the task using the provided task runner. This method is used to\nestablish a websocket connection with the Prefect server and listen for\nsubmitted task runs to execute.

\n\n

Args:\n task_runner: The task runner to use for serving the task. If not provided,\n the default ConcurrentTaskRunner will be used.

\n\n

Examples:\n Serve a task using the default task runner

\n\n
\n
\n
\n

@task\n def my_task():\n return 1

\n\n
>>> my_task.serve()\n
\n
\n
\n
\n", "signature": "(\tself,\ttask_runner: Optional[prefect.task_runners.BaseTaskRunner] = None) -> prefect.tasks.Task:", "funcdef": "def" }, "prefect.unmapped": { "fullname": "prefect.unmapped", "modulename": "prefect", "qualname": "unmapped", "kind": "class", "doc": "

Wrapper for iterables.

\n\n

Indicates that this input should be sent as-is to all runs created during a mapping\noperation instead of being split.

\n", "bases": "prefect.utilities.annotations.BaseAnnotation[~T]" }, "prefect.unmapped.__init__": { "fullname": "prefect.unmapped.__init__", "modulename": "prefect", "qualname": "unmapped.__init__", "kind": "function", "doc": "

Create new instance of BaseAnnotation(value,)

\n", "signature": "(value)" }, "prefect.serve": { "fullname": "prefect.serve", "modulename": "prefect", "qualname": "serve", "kind": "function", "doc": "

Serve the provided list of deployments.

\n\n

Args:\n args: A list of deployments to serve.\n pause_on_shutdown: A boolean for whether or not to automatically pause\n deployment schedules on shutdown.\n print_starting_message: Whether or not to print message to the console\n on startup.\n limit: The maximum number of runs that can be executed concurrently.\n *kwargs: Additional keyword arguments to pass to the runner.

\n\n

Examples:\n Prepare two deployments and serve them:\n

\n
import datetime\n 
\n from prefect import flow, serve\n
\n @flow\n def my_flow(name):\n print(f"hello {name}")\n
\n @flow\n def my_other_flow(name):\n print(f"goodbye {name}")\n
\n if __name__ == "__main__":\n # Run once a day\n hello_deploy = my_flow.to_deployment(\n "hello", tags=["dev"], interval=datetime.timedelta(days=1)\n )\n
\n # Run every Sunday at 4:00 AM\n bye_deploy = my_other_flow.to_deployment(\n "goodbye", tags=["dev"], cron="0 4 * * sun"\n )\n
\n serve(hello_deploy, bye_deploy)\n
\n

\n", "signature": "(\t*args: prefect.deployments.runner.RunnerDeployment,\tpause_on_shutdown: bool = True,\tprint_starting_message: bool = True,\tlimit: Optional[int] = None,\t**kwargs):", "funcdef": "async def" }, "prefect.deploy": { "fullname": "prefect.deploy", "modulename": "prefect", "qualname": "deploy", "kind": "function", "doc": "

Deploy the provided list of deployments to dynamic infrastructure via a\nwork pool.

\n\n

By default, calling this function will build a Docker image for the deployments, push it to a\nregistry, and create each deployment via the Prefect API that will run the corresponding\nflow on the given schedule.

\n\n

If you want to use an existing image, you can pass build=False to skip building and pushing\nan image.

\n\n

Args:\n *deployments: A list of deployments to deploy.\n work_pool_name: The name of the work pool to use for these deployments. Defaults to\n the value of PREFECT_DEFAULT_WORK_POOL_NAME.\n image: The name of the Docker image to build, including the registry and\n repository. Pass a DeploymentImage instance to customize the Dockerfile used\n and build arguments.\n build: Whether or not to build a new image for the flow. If False, the provided\n image will be used as-is and pulled at runtime.\n push: Whether or not to skip pushing the built image to a registry.\n print_next_steps_message: Whether or not to print a message with next steps\n after deploying the deployments.

\n\n

Returns:\n A list of deployment IDs for the created/updated deployments.

\n\n

Examples:\n Deploy a group of flows to a work pool:\n

\n
from prefect import deploy, flow\n 
\n @flow(log_prints=True)\n def local_flow():\n print("I'm a locally defined flow!")\n
\n if __name__ == "__main__":\n deploy(\n local_flow.to_deployment(name="example-deploy-local-flow"),\n flow.from_source(\n source="https://github.com/org/repo.git&quot;,\n entrypoint="flows.py:my_flow",\n ).to_deployment(\n name="example-deploy-remote-flow",\n ),\n work_pool_name="my-work-pool",\n image="my-registry/my-image:dev",\n )\n
\n

\n", "signature": "(\t*deployments: prefect.deployments.runner.RunnerDeployment,\twork_pool_name: Optional[str] = None,\timage: Union[str, prefect.deployments.runner.DeploymentImage, NoneType] = None,\tbuild: bool = True,\tpush: bool = True,\tprint_next_steps_message: bool = True,\tignore_warnings: bool = False) -> List[uuid.UUID]:", "funcdef": "async def" }, "prefect.pause_flow_run": { "fullname": "prefect.pause_flow_run", "modulename": "prefect", "qualname": "pause_flow_run", "kind": "function", "doc": "

Pauses the current flow run by blocking execution until resumed.

\n\n

When called within a flow run, execution will block and no downstream tasks will\nrun until the flow is resumed. Task runs that have already started will continue\nrunning. A timeout parameter can be passed that will fail the flow run if it has not\nbeen resumed within the specified time.

\n\n

Args:\n flow_run_id: a flow run id. If supplied, this function will attempt to pause\n the specified flow run outside of the flow run process. When paused, the\n flow run will continue execution until the NEXT task is orchestrated, at\n which point the flow will exit. Any tasks that have already started will\n run until completion. When resumed, the flow run will be rescheduled to\n finish execution. In order pause a flow run in this way, the flow needs to\n have an associated deployment and results need to be configured with the\n persist_results option.\n timeout: the number of seconds to wait for the flow to be resumed before\n failing. Defaults to 1 hour (3600 seconds). If the pause timeout exceeds\n any configured flow-level timeout, the flow might fail even after resuming.\n poll_interval: The number of seconds between checking whether the flow has been\n resumed. Defaults to 10 seconds.\n reschedule: Flag that will reschedule the flow run if resumed. Instead of\n blocking execution, the flow will gracefully exit (with no result returned)\n instead. To use this flag, a flow needs to have an associated deployment and\n results need to be configured with the persist_results option.\n key: An optional key to prevent calling pauses more than once. This defaults to\n the number of pauses observed by the flow so far, and prevents pauses that\n use the \"reschedule\" option from running the same pause twice. A custom key\n can be supplied for custom pausing behavior.\n wait_for_input: a subclass of RunInput or any type supported by\n Pydantic. If provided when the flow pauses, the flow will wait for the\n input to be provided before resuming. If the flow is resumed without\n providing the input, the flow will fail. If the flow is resumed with the\n input, the flow will resume and the input will be loaded and returned\n from this function.

\n\n

Example:

\n\n
\n
@task\ndef task_one():\n    for i in range(3):\n        sleep(1)\n\n@flow\ndef my_flow():\n    terminal_state = task_one.submit(return_state=True)\n    if terminal_state.type == StateType.COMPLETED:\n        print("Task one succeeded! Pausing flow run..")\n        pause_flow_run(timeout=2)\n    else:\n        print("Task one failed. Skipping pause flow run..")\n
\n
\n", "signature": "(\twait_for_input: Optional[Type[~T]] = None,\tflow_run_id: uuid.UUID = None,\ttimeout: int = 3600,\tpoll_interval: int = 10,\treschedule: bool = False,\tkey: str = None) -> Optional[~T]:", "funcdef": "def" }, "prefect.resume_flow_run": { "fullname": "prefect.resume_flow_run", "modulename": "prefect", "qualname": "resume_flow_run", "kind": "function", "doc": "

Resumes a paused flow.

\n\n

Args:\n flow_run_id: the flow_run_id to resume\n run_input: a dictionary of inputs to provide to the flow run.

\n", "signature": "(flow_run_id, run_input: Optional[Dict] = None):", "funcdef": "async def" }, "prefect.suspend_flow_run": { "fullname": "prefect.suspend_flow_run", "modulename": "prefect", "qualname": "suspend_flow_run", "kind": "function", "doc": "

Suspends a flow run by stopping code execution until resumed.

\n\n

When suspended, the flow run will continue execution until the NEXT task is\norchestrated, at which point the flow will exit. Any tasks that have\nalready started will run until completion. When resumed, the flow run will\nbe rescheduled to finish execution. In order suspend a flow run in this\nway, the flow needs to have an associated deployment and results need to be\nconfigured with the persist_results option.

\n\n

Args:\n flow_run_id: a flow run id. If supplied, this function will attempt to\n suspend the specified flow run. If not supplied will attempt to\n suspend the current flow run.\n timeout: the number of seconds to wait for the flow to be resumed before\n failing. Defaults to 1 hour (3600 seconds). If the pause timeout\n exceeds any configured flow-level timeout, the flow might fail even\n after resuming.\n key: An optional key to prevent calling suspend more than once. This\n defaults to a random string and prevents suspends from running the\n same suspend twice. A custom key can be supplied for custom\n suspending behavior.\n wait_for_input: a subclass of RunInput or any type supported by\n Pydantic. If provided when the flow suspends, the flow will remain\n suspended until receiving the input before resuming. If the flow is\n resumed without providing the input, the flow will fail. If the flow is\n resumed with the input, the flow will resume and the input will be\n loaded and returned from this function.

\n", "signature": "(\twait_for_input: Optional[Type[~T]] = None,\tflow_run_id: Optional[uuid.UUID] = None,\ttimeout: Optional[int] = 3600,\tkey: Optional[str] = None,\tclient: prefect.client.orchestration.PrefectClient = None) -> Optional[~T]:", "funcdef": "async def" } }, "docInfo": { "prefect": { "qualname": 0, "fullname": 1, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.allow_failure": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 74 }, "prefect.allow_failure.__init__": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 9 }, "prefect.flow": { "qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 988, "bases": 0, "doc": 909 }, "prefect.Flow": { "qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 601 }, "prefect.Flow.__init__": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 995, "bases": 0, "doc": 3 }, "prefect.Flow.name": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.flow_run_name": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.task_runner": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.log_prints": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.description": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.fn": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.isasync": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.version": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.timeout_seconds": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.retries": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.retry_delay_seconds": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.parameters": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.should_validate_parameters": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.persist_result": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.result_storage": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.result_serializer": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.cache_result_in_memory": { "qualname": 5, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.on_completion": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.on_failure": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.on_cancellation": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.on_crashed": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.on_running": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Flow.with_options": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 932, "bases": 0, "doc": 416 }, "prefect.Flow.validate_parameters": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 54 }, "prefect.Flow.serialize_parameters": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 41 }, "prefect.Flow.to_deployment": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 1046, "bases": 0, "doc": 628 }, "prefect.Flow.serve": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 1036, "bases": 0, "doc": 883 }, "prefect.Flow.from_source": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 95, "bases": 0, "doc": 666 }, "prefect.Flow.deploy": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 971, "bases": 0, "doc": 1149 }, "prefect.Flow.visualize": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 25, "bases": 0, "doc": 60 }, "prefect.get_client": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 105, "bases": 0, "doc": 132 }, "prefect.get_run_logger": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 72, "bases": 0, "doc": 123 }, "prefect.Manifest": { "qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 9 }, "prefect.Manifest.model_config": { "qualname": 3, "fullname": 4, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Manifest.flow_name": { "qualname": 3, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Manifest.import_path": { "qualname": 3, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Manifest.parameter_openapi_schema": { "qualname": 4, "fullname": 5, "annotation": 5, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Manifest.model_fields": { "qualname": 3, "fullname": 4, "annotation": 6, "default_value": 49, "signature": 0, "bases": 0, "doc": 3 }, "prefect.State": { "qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 7, "doc": 8 }, "prefect.State.type": { "qualname": 2, "fullname": 3, "annotation": 6, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.State.name": { "qualname": 2, "fullname": 3, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.State.timestamp": { "qualname": 2, "fullname": 3, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.State.message": { "qualname": 2, "fullname": 3, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.State.state_details": { "qualname": 3, "fullname": 4, "annotation": 6, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.State.data": { "qualname": 2, "fullname": 3, "annotation": 10, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.State.result": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 367 }, "prefect.State.to_state_create": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 60 }, "prefect.State.default_name_from_type": { "qualname": 5, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 33, "bases": 0, "doc": 3 }, "prefect.State.default_scheduled_start_time": { "qualname": 5, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 32 }, "prefect.State.is_scheduled": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3 }, "prefect.State.is_pending": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3 }, "prefect.State.is_running": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3 }, "prefect.State.is_completed": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3 }, "prefect.State.is_failed": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3 }, "prefect.State.is_crashed": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3 }, "prefect.State.is_cancelled": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3 }, "prefect.State.is_cancelling": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3 }, "prefect.State.is_final": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3 }, "prefect.State.is_paused": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3 }, "prefect.State.copy": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 79, "bases": 0, "doc": 26 }, "prefect.tags": { "qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 255 }, "prefect.task": { "qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 905, "bases": 0, "doc": 932 }, "prefect.Task": { "qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 712 }, "prefect.Task.__init__": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 968, "bases": 0, "doc": 3 }, "prefect.Task.description": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.fn": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.isasync": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.task_run_name": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.version": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.log_prints": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.tags": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.cache_key_fn": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.cache_expiration": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.refresh_cache": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.retries": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.retry_jitter_factor": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.persist_result": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.result_storage": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.result_serializer": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.result_storage_key": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.cache_result_in_memory": { "qualname": 5, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.timeout_seconds": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.on_completion": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.on_failure": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.retry_condition_fn": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.viz_return_value": { "qualname": 4, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3 }, "prefect.Task.with_options": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 757, "bases": 0, "doc": 640 }, "prefect.Task.create_run": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 296, "bases": 0, "doc": 3 }, "prefect.Task.submit": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 191, "bases": 0, "doc": 479 }, "prefect.Task.map": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 99, "bases": 0, "doc": 662 }, "prefect.Task.serve": { "qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 100 }, "prefect.unmapped": { "qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 30 }, "prefect.unmapped.__init__": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 9 }, "prefect.serve": { "qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 420 }, "prefect.deploy": { "qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 201, "bases": 0, "doc": 487 }, "prefect.pause_flow_run": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 147, "bases": 0, "doc": 582 }, "prefect.resume_flow_run": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 37, "bases": 0, "doc": 32 }, "prefect.suspend_flow_run": { "qualname": 3, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 161, "bases": 0, "doc": 264 } }, "length": 104, "save": true }, "index": { "qualname": { "root": { "docs": { "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 } }, "df": 4, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.allow_failure.__init__": { "tf": 1 } }, "df": 2 } } } } }, "f": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.Flow.on_failure": { "tf": 1 }, "prefect.Task.on_failure": { "tf": 1 } }, "df": 4 } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.is_failed": { "tf": 1 } }, "df": 1 } } } }, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Task.retry_jitter_factor": { "tf": 1 } }, "df": 1 } } } } }, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.name": { "tf": 1 }, "prefect.Flow.flow_run_name": { "tf": 1.4142135623730951 }, "prefect.Flow.task_runner": { "tf": 1 }, "prefect.Flow.log_prints": { "tf": 1 }, "prefect.Flow.description": { "tf": 1 }, "prefect.Flow.fn": { "tf": 1 }, "prefect.Flow.isasync": { "tf": 1 }, "prefect.Flow.version": { "tf": 1 }, "prefect.Flow.timeout_seconds": { "tf": 1 }, "prefect.Flow.retries": { "tf": 1 }, "prefect.Flow.retry_delay_seconds": { "tf": 1 }, "prefect.Flow.parameters": { "tf": 1 }, "prefect.Flow.should_validate_parameters": { "tf": 1 }, "prefect.Flow.persist_result": { "tf": 1 }, "prefect.Flow.result_storage": { "tf": 1 }, "prefect.Flow.result_serializer": { "tf": 1 }, "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.Flow.on_completion": { "tf": 1 }, "prefect.Flow.on_failure": { "tf": 1 }, "prefect.Flow.on_cancellation": { "tf": 1 }, "prefect.Flow.on_crashed": { "tf": 1 }, "prefect.Flow.on_running": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.Manifest.flow_name": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 37 } } }, "n": { "docs": { "prefect.Flow.fn": { "tf": 1 }, "prefect.Task.fn": { "tf": 1 }, "prefect.Task.cache_key_fn": { "tf": 1 }, "prefect.Task.retry_condition_fn": { "tf": 1 } }, "df": 4 }, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "m": { "docs": { "prefect.Flow.from_source": { "tf": 1 }, "prefect.State.default_name_from_type": { "tf": 1 } }, "df": 2 } } }, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 1 } } } }, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.State.is_final": { "tf": 1 } }, "df": 1 } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 } }, "df": 2, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 } }, "df": 4 } } }, "s": { "docs": { "prefect.State.is_scheduled": { "tf": 1 }, "prefect.State.is_pending": { "tf": 1 }, "prefect.State.is_running": { "tf": 1 }, "prefect.State.is_completed": { "tf": 1 }, "prefect.State.is_failed": { "tf": 1 }, "prefect.State.is_crashed": { "tf": 1 }, "prefect.State.is_cancelled": { "tf": 1 }, "prefect.State.is_cancelling": { "tf": 1 }, "prefect.State.is_final": { "tf": 1 }, "prefect.State.is_paused": { "tf": 1 } }, "df": 10, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Flow.isasync": { "tf": 1 }, "prefect.Task.isasync": { "tf": 1 } }, "df": 2 } } } } } }, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Manifest.import_path": { "tf": 1 } }, "df": 1 } } } } } }, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.name": { "tf": 1 }, "prefect.Flow.flow_run_name": { "tf": 1 }, "prefect.Manifest.flow_name": { "tf": 1 }, "prefect.State.name": { "tf": 1 }, "prefect.State.default_name_from_type": { "tf": 1 }, "prefect.Task.task_run_name": { "tf": 1 } }, "df": 6 } } } }, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.flow_run_name": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.Task.task_run_name": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 7, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.task_runner": { "tf": 1 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.on_running": { "tf": 1 }, "prefect.State.is_running": { "tf": 1 } }, "df": 2 } } } } } }, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.retries": { "tf": 1 }, "prefect.Task.retries": { "tf": 1 } }, "df": 2 } } }, "y": { "docs": { "prefect.Flow.retry_delay_seconds": { "tf": 1 }, "prefect.Task.retry_jitter_factor": { "tf": 1 }, "prefect.Task.retry_condition_fn": { "tf": 1 } }, "df": 3 } }, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Task.viz_return_value": { "tf": 1 } }, "df": 1 } } } }, "s": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.persist_result": { "tf": 1 }, "prefect.Flow.result_storage": { "tf": 1 }, "prefect.Flow.result_serializer": { "tf": 1 }, "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.Task.persist_result": { "tf": 1 }, "prefect.Task.result_storage": { "tf": 1 }, "prefect.Task.result_serializer": { "tf": 1 }, "prefect.Task.result_storage_key": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 } }, "df": 10 } }, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.resume_flow_run": { "tf": 1 } }, "df": 1 } } } }, "f": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Task.refresh_cache": { "tf": 1 } }, "df": 1 } } } } } } }, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "k": { "docs": { "prefect.Flow.task_runner": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.description": { "tf": 1 }, "prefect.Task.fn": { "tf": 1 }, "prefect.Task.isasync": { "tf": 1 }, "prefect.Task.task_run_name": { "tf": 1.4142135623730951 }, "prefect.Task.version": { "tf": 1 }, "prefect.Task.log_prints": { "tf": 1 }, "prefect.Task.tags": { "tf": 1 }, "prefect.Task.cache_key_fn": { "tf": 1 }, "prefect.Task.cache_expiration": { "tf": 1 }, "prefect.Task.refresh_cache": { "tf": 1 }, "prefect.Task.retries": { "tf": 1 }, "prefect.Task.retry_jitter_factor": { "tf": 1 }, "prefect.Task.persist_result": { "tf": 1 }, "prefect.Task.result_storage": { "tf": 1 }, "prefect.Task.result_serializer": { "tf": 1 }, "prefect.Task.result_storage_key": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 }, "prefect.Task.timeout_seconds": { "tf": 1 }, "prefect.Task.on_completion": { "tf": 1 }, "prefect.Task.on_failure": { "tf": 1 }, "prefect.Task.retry_condition_fn": { "tf": 1 }, "prefect.Task.viz_return_value": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 } }, "df": 31 } }, "g": { "docs": {}, "df": 0, "s": { "docs": { "prefect.tags": { "tf": 1 }, "prefect.Task.tags": { "tf": 1 } }, "df": 2 } } }, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.timeout_seconds": { "tf": 1 }, "prefect.Task.timeout_seconds": { "tf": 1 } }, "df": 2 } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": { "prefect.State.timestamp": { "tf": 1 } }, "df": 1 } } } } } } } }, "o": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 } }, "df": 2 }, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.type": { "tf": 1 }, "prefect.State.default_name_from_type": { "tf": 1 } }, "df": 2 } } } }, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.log_prints": { "tf": 1 }, "prefect.Task.log_prints": { "tf": 1 } }, "df": 2, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } } } } }, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.log_prints": { "tf": 1 }, "prefect.Task.log_prints": { "tf": 1 } }, "df": 2 } } } } }, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Manifest.parameter_openapi_schema": { "tf": 1 } }, "df": 1, "s": { "docs": { "prefect.Flow.parameters": { "tf": 1 }, "prefect.Flow.should_validate_parameters": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 4 } } } } } } } }, "t": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Manifest.import_path": { "tf": 1 } }, "df": 1 } }, "u": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1, "d": { "docs": { "prefect.State.is_paused": { "tf": 1 } }, "df": 1 } } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.persist_result": { "tf": 1 }, "prefect.Task.persist_result": { "tf": 1 } }, "df": 2 } } } } }, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.State.is_pending": { "tf": 1 } }, "df": 1 } } } } } } }, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.description": { "tf": 1 }, "prefect.Task.description": { "tf": 1 } }, "df": 2 } } } } } } } } }, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.retry_delay_seconds": { "tf": 1 } }, "df": 1 } } }, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 } }, "df": 1 } } } } } } } }, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.state_details": { "tf": 1 } }, "df": 1 } } } } }, "f": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": { "prefect.State.default_name_from_type": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 2 } } } } } }, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": { "prefect.State.data": { "tf": 1 } }, "df": 1 } } } }, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.version": { "tf": 1 }, "prefect.Task.version": { "tf": 1 } }, "df": 2 } } } } } }, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.should_validate_parameters": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 } }, "df": 2 } } } } }, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Task.viz_return_value": { "tf": 1 } }, "df": 1 } } } }, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } } } }, "z": { "docs": { "prefect.Task.viz_return_value": { "tf": 1 } }, "df": 1 } } }, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.timeout_seconds": { "tf": 1 }, "prefect.Flow.retry_delay_seconds": { "tf": 1 }, "prefect.Task.timeout_seconds": { "tf": 1 } }, "df": 3 } } } } }, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1, "r": { "docs": { "prefect.Flow.result_serializer": { "tf": 1 }, "prefect.Task.result_serializer": { "tf": 1 } }, "df": 2 } } } } } } }, "v": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 3 } } } }, "h": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.should_validate_parameters": { "tf": 1 } }, "df": 1 } } } } }, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.result_storage": { "tf": 1 }, "prefect.Task.result_storage": { "tf": 1 }, "prefect.Task.result_storage_key": { "tf": 1 } }, "df": 3 } } } } }, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State": { "tf": 1 }, "prefect.State.type": { "tf": 1 }, "prefect.State.name": { "tf": 1 }, "prefect.State.timestamp": { "tf": 1 }, "prefect.State.message": { "tf": 1 }, "prefect.State.state_details": { "tf": 1.4142135623730951 }, "prefect.State.data": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1.4142135623730951 }, "prefect.State.default_name_from_type": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.is_scheduled": { "tf": 1 }, "prefect.State.is_pending": { "tf": 1 }, "prefect.State.is_running": { "tf": 1 }, "prefect.State.is_completed": { "tf": 1 }, "prefect.State.is_failed": { "tf": 1 }, "prefect.State.is_crashed": { "tf": 1 }, "prefect.State.is_cancelled": { "tf": 1 }, "prefect.State.is_cancelling": { "tf": 1 }, "prefect.State.is_final": { "tf": 1 }, "prefect.State.is_paused": { "tf": 1 }, "prefect.State.copy": { "tf": 1 } }, "df": 22 } }, "r": { "docs": {}, "df": 0, "t": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1 } } } }, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Manifest.parameter_openapi_schema": { "tf": 1 } }, "df": 1 } }, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.is_scheduled": { "tf": 1 } }, "df": 2 } } } } } } } }, "u": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.submit": { "tf": 1 } }, "df": 1 } } } }, "s": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": { "prefect.suspend_flow_run": { "tf": 1 } }, "df": 1 } } } } } } }, "c": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.Task.cache_key_fn": { "tf": 1 }, "prefect.Task.cache_expiration": { "tf": 1 }, "prefect.Task.refresh_cache": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 } }, "df": 5 } } }, "n": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.on_cancellation": { "tf": 1 } }, "df": 1 } } } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.is_cancelled": { "tf": 1 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.State.is_cancelling": { "tf": 1 } }, "df": 1 } } } } } } } } }, "o": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.on_completion": { "tf": 1 }, "prefect.Task.on_completion": { "tf": 1 } }, "df": 2 } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.is_completed": { "tf": 1 } }, "df": 1 } } } } } } }, "n": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Manifest.model_config": { "tf": 1 } }, "df": 1 } } }, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Task.retry_condition_fn": { "tf": 1 } }, "df": 1 } } } } } } }, "p": { "docs": {}, "df": 0, "y": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } }, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.on_crashed": { "tf": 1 }, "prefect.State.is_crashed": { "tf": 1 } }, "df": 2 } } } } }, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.to_state_create": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 } }, "df": 2 } } } } }, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_client": { "tf": 1 } }, "df": 1 } } } } } }, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 } }, "df": 2 } } } }, "s": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.message": { "tf": 1 } }, "df": 1 } } } } } }, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Manifest": { "tf": 1 }, "prefect.Manifest.model_config": { "tf": 1 }, "prefect.Manifest.flow_name": { "tf": 1 }, "prefect.Manifest.import_path": { "tf": 1 }, "prefect.Manifest.parameter_openapi_schema": { "tf": 1 }, "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 6 } } } } } }, "p": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 } }, "o": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Manifest.model_config": { "tf": 1 }, "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 2 } } } } }, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.on_completion": { "tf": 1 }, "prefect.Flow.on_failure": { "tf": 1 }, "prefect.Flow.on_cancellation": { "tf": 1 }, "prefect.Flow.on_crashed": { "tf": 1 }, "prefect.Flow.on_running": { "tf": 1 }, "prefect.Task.on_completion": { "tf": 1 }, "prefect.Task.on_failure": { "tf": 1 } }, "df": 7 }, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 2 } } } } }, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "i": { "docs": { "prefect.Manifest.parameter_openapi_schema": { "tf": 1 } }, "df": 1 } } } } } } }, "w": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 2 } } } }, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 } }, "df": 2 } } }, "k": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Task.cache_key_fn": { "tf": 1 }, "prefect.Task.result_storage_key": { "tf": 1 } }, "df": 2 } } }, "e": { "docs": {}, "df": 0, "x": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Task.cache_expiration": { "tf": 1 } }, "df": 1 } } } } } } } } } }, "j": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Task.retry_jitter_factor": { "tf": 1 } }, "df": 1 } } } } } }, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.unmapped": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 } }, "df": 2 } } } } } } } } } }, "fullname": { "root": { "docs": { "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 } }, "df": 4, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect": { "tf": 1 }, "prefect.allow_failure": { "tf": 1 }, "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.name": { "tf": 1 }, "prefect.Flow.flow_run_name": { "tf": 1 }, "prefect.Flow.task_runner": { "tf": 1 }, "prefect.Flow.log_prints": { "tf": 1 }, "prefect.Flow.description": { "tf": 1 }, "prefect.Flow.fn": { "tf": 1 }, "prefect.Flow.isasync": { "tf": 1 }, "prefect.Flow.version": { "tf": 1 }, "prefect.Flow.timeout_seconds": { "tf": 1 }, "prefect.Flow.retries": { "tf": 1 }, "prefect.Flow.retry_delay_seconds": { "tf": 1 }, "prefect.Flow.parameters": { "tf": 1 }, "prefect.Flow.should_validate_parameters": { "tf": 1 }, "prefect.Flow.persist_result": { "tf": 1 }, "prefect.Flow.result_storage": { "tf": 1 }, "prefect.Flow.result_serializer": { "tf": 1 }, "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.Flow.on_completion": { "tf": 1 }, "prefect.Flow.on_failure": { "tf": 1 }, "prefect.Flow.on_cancellation": { "tf": 1 }, "prefect.Flow.on_crashed": { "tf": 1 }, "prefect.Flow.on_running": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.Manifest": { "tf": 1 }, "prefect.Manifest.model_config": { "tf": 1 }, "prefect.Manifest.flow_name": { "tf": 1 }, "prefect.Manifest.import_path": { "tf": 1 }, "prefect.Manifest.parameter_openapi_schema": { "tf": 1 }, "prefect.Manifest.model_fields": { "tf": 1 }, "prefect.State": { "tf": 1 }, "prefect.State.type": { "tf": 1 }, "prefect.State.name": { "tf": 1 }, "prefect.State.timestamp": { "tf": 1 }, "prefect.State.message": { "tf": 1 }, "prefect.State.state_details": { "tf": 1 }, "prefect.State.data": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.State.default_name_from_type": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.is_scheduled": { "tf": 1 }, "prefect.State.is_pending": { "tf": 1 }, "prefect.State.is_running": { "tf": 1 }, "prefect.State.is_completed": { "tf": 1 }, "prefect.State.is_failed": { "tf": 1 }, "prefect.State.is_crashed": { "tf": 1 }, "prefect.State.is_cancelled": { "tf": 1 }, "prefect.State.is_cancelling": { "tf": 1 }, "prefect.State.is_final": { "tf": 1 }, "prefect.State.is_paused": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.description": { "tf": 1 }, "prefect.Task.fn": { "tf": 1 }, "prefect.Task.isasync": { "tf": 1 }, "prefect.Task.task_run_name": { "tf": 1 }, "prefect.Task.version": { "tf": 1 }, "prefect.Task.log_prints": { "tf": 1 }, "prefect.Task.tags": { "tf": 1 }, "prefect.Task.cache_key_fn": { "tf": 1 }, "prefect.Task.cache_expiration": { "tf": 1 }, "prefect.Task.refresh_cache": { "tf": 1 }, "prefect.Task.retries": { "tf": 1 }, "prefect.Task.retry_jitter_factor": { "tf": 1 }, "prefect.Task.persist_result": { "tf": 1 }, "prefect.Task.result_storage": { "tf": 1 }, "prefect.Task.result_serializer": { "tf": 1 }, "prefect.Task.result_storage_key": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 }, "prefect.Task.timeout_seconds": { "tf": 1 }, "prefect.Task.on_completion": { "tf": 1 }, "prefect.Task.on_failure": { "tf": 1 }, "prefect.Task.retry_condition_fn": { "tf": 1 }, "prefect.Task.viz_return_value": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 }, "prefect.unmapped": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 104 } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.log_prints": { "tf": 1 }, "prefect.Task.log_prints": { "tf": 1 } }, "df": 2 } } } } }, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Manifest.parameter_openapi_schema": { "tf": 1 } }, "df": 1, "s": { "docs": { "prefect.Flow.parameters": { "tf": 1 }, "prefect.Flow.should_validate_parameters": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 4 } } } } } } } }, "t": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Manifest.import_path": { "tf": 1 } }, "df": 1 } }, "u": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1, "d": { "docs": { "prefect.State.is_paused": { "tf": 1 } }, "df": 1 } } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.persist_result": { "tf": 1 }, "prefect.Task.persist_result": { "tf": 1 } }, "df": 2 } } } } }, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.State.is_pending": { "tf": 1 } }, "df": 1 } } } } } } }, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.allow_failure.__init__": { "tf": 1 } }, "df": 2 } } } } }, "f": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.Flow.on_failure": { "tf": 1 }, "prefect.Task.on_failure": { "tf": 1 } }, "df": 4 } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.is_failed": { "tf": 1 } }, "df": 1 } } } }, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Task.retry_jitter_factor": { "tf": 1 } }, "df": 1 } } } } }, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.name": { "tf": 1 }, "prefect.Flow.flow_run_name": { "tf": 1.4142135623730951 }, "prefect.Flow.task_runner": { "tf": 1 }, "prefect.Flow.log_prints": { "tf": 1 }, "prefect.Flow.description": { "tf": 1 }, "prefect.Flow.fn": { "tf": 1 }, "prefect.Flow.isasync": { "tf": 1 }, "prefect.Flow.version": { "tf": 1 }, "prefect.Flow.timeout_seconds": { "tf": 1 }, "prefect.Flow.retries": { "tf": 1 }, "prefect.Flow.retry_delay_seconds": { "tf": 1 }, "prefect.Flow.parameters": { "tf": 1 }, "prefect.Flow.should_validate_parameters": { "tf": 1 }, "prefect.Flow.persist_result": { "tf": 1 }, "prefect.Flow.result_storage": { "tf": 1 }, "prefect.Flow.result_serializer": { "tf": 1 }, "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.Flow.on_completion": { "tf": 1 }, "prefect.Flow.on_failure": { "tf": 1 }, "prefect.Flow.on_cancellation": { "tf": 1 }, "prefect.Flow.on_crashed": { "tf": 1 }, "prefect.Flow.on_running": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.Manifest.flow_name": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 37 } } }, "n": { "docs": { "prefect.Flow.fn": { "tf": 1 }, "prefect.Task.fn": { "tf": 1 }, "prefect.Task.cache_key_fn": { "tf": 1 }, "prefect.Task.retry_condition_fn": { "tf": 1 } }, "df": 4 }, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "m": { "docs": { "prefect.Flow.from_source": { "tf": 1 }, "prefect.State.default_name_from_type": { "tf": 1 } }, "df": 2 } } }, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 1 } } } }, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.State.is_final": { "tf": 1 } }, "df": 1 } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 } }, "df": 2, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 } }, "df": 4 } } }, "s": { "docs": { "prefect.State.is_scheduled": { "tf": 1 }, "prefect.State.is_pending": { "tf": 1 }, "prefect.State.is_running": { "tf": 1 }, "prefect.State.is_completed": { "tf": 1 }, "prefect.State.is_failed": { "tf": 1 }, "prefect.State.is_crashed": { "tf": 1 }, "prefect.State.is_cancelled": { "tf": 1 }, "prefect.State.is_cancelling": { "tf": 1 }, "prefect.State.is_final": { "tf": 1 }, "prefect.State.is_paused": { "tf": 1 } }, "df": 10, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Flow.isasync": { "tf": 1 }, "prefect.Task.isasync": { "tf": 1 } }, "df": 2 } } } } } }, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Manifest.import_path": { "tf": 1 } }, "df": 1 } } } } } }, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.name": { "tf": 1 }, "prefect.Flow.flow_run_name": { "tf": 1 }, "prefect.Manifest.flow_name": { "tf": 1 }, "prefect.State.name": { "tf": 1 }, "prefect.State.default_name_from_type": { "tf": 1 }, "prefect.Task.task_run_name": { "tf": 1 } }, "df": 6 } } } }, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.flow_run_name": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.Task.task_run_name": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 7, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.task_runner": { "tf": 1 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.on_running": { "tf": 1 }, "prefect.State.is_running": { "tf": 1 } }, "df": 2 } } } } } }, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.retries": { "tf": 1 }, "prefect.Task.retries": { "tf": 1 } }, "df": 2 } } }, "y": { "docs": { "prefect.Flow.retry_delay_seconds": { "tf": 1 }, "prefect.Task.retry_jitter_factor": { "tf": 1 }, "prefect.Task.retry_condition_fn": { "tf": 1 } }, "df": 3 } }, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Task.viz_return_value": { "tf": 1 } }, "df": 1 } } } }, "s": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.persist_result": { "tf": 1 }, "prefect.Flow.result_storage": { "tf": 1 }, "prefect.Flow.result_serializer": { "tf": 1 }, "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.Task.persist_result": { "tf": 1 }, "prefect.Task.result_storage": { "tf": 1 }, "prefect.Task.result_serializer": { "tf": 1 }, "prefect.Task.result_storage_key": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 } }, "df": 10 } }, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.resume_flow_run": { "tf": 1 } }, "df": 1 } } } }, "f": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Task.refresh_cache": { "tf": 1 } }, "df": 1 } } } } } } }, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "k": { "docs": { "prefect.Flow.task_runner": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.description": { "tf": 1 }, "prefect.Task.fn": { "tf": 1 }, "prefect.Task.isasync": { "tf": 1 }, "prefect.Task.task_run_name": { "tf": 1.4142135623730951 }, "prefect.Task.version": { "tf": 1 }, "prefect.Task.log_prints": { "tf": 1 }, "prefect.Task.tags": { "tf": 1 }, "prefect.Task.cache_key_fn": { "tf": 1 }, "prefect.Task.cache_expiration": { "tf": 1 }, "prefect.Task.refresh_cache": { "tf": 1 }, "prefect.Task.retries": { "tf": 1 }, "prefect.Task.retry_jitter_factor": { "tf": 1 }, "prefect.Task.persist_result": { "tf": 1 }, "prefect.Task.result_storage": { "tf": 1 }, "prefect.Task.result_serializer": { "tf": 1 }, "prefect.Task.result_storage_key": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 }, "prefect.Task.timeout_seconds": { "tf": 1 }, "prefect.Task.on_completion": { "tf": 1 }, "prefect.Task.on_failure": { "tf": 1 }, "prefect.Task.retry_condition_fn": { "tf": 1 }, "prefect.Task.viz_return_value": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 } }, "df": 31 } }, "g": { "docs": {}, "df": 0, "s": { "docs": { "prefect.tags": { "tf": 1 }, "prefect.Task.tags": { "tf": 1 } }, "df": 2 } } }, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.timeout_seconds": { "tf": 1 }, "prefect.Task.timeout_seconds": { "tf": 1 } }, "df": 2 } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": { "prefect.State.timestamp": { "tf": 1 } }, "df": 1 } } } } } } } }, "o": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 } }, "df": 2 }, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.type": { "tf": 1 }, "prefect.State.default_name_from_type": { "tf": 1 } }, "df": 2 } } } }, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.log_prints": { "tf": 1 }, "prefect.Task.log_prints": { "tf": 1 } }, "df": 2, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } } } } }, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.description": { "tf": 1 }, "prefect.Task.description": { "tf": 1 } }, "df": 2 } } } } } } } } }, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.retry_delay_seconds": { "tf": 1 } }, "df": 1 } } }, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 } }, "df": 1 } } } } } } } }, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.state_details": { "tf": 1 } }, "df": 1 } } } } }, "f": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": { "prefect.State.default_name_from_type": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 2 } } } } } }, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": { "prefect.State.data": { "tf": 1 } }, "df": 1 } } } }, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.version": { "tf": 1 }, "prefect.Task.version": { "tf": 1 } }, "df": 2 } } } } } }, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.should_validate_parameters": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 } }, "df": 2 } } } } }, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Task.viz_return_value": { "tf": 1 } }, "df": 1 } } } }, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } } } }, "z": { "docs": { "prefect.Task.viz_return_value": { "tf": 1 } }, "df": 1 } } }, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.timeout_seconds": { "tf": 1 }, "prefect.Flow.retry_delay_seconds": { "tf": 1 }, "prefect.Task.timeout_seconds": { "tf": 1 } }, "df": 3 } } } } }, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1, "r": { "docs": { "prefect.Flow.result_serializer": { "tf": 1 }, "prefect.Task.result_serializer": { "tf": 1 } }, "df": 2 } } } } } } }, "v": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 3 } } } }, "h": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.should_validate_parameters": { "tf": 1 } }, "df": 1 } } } } }, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.result_storage": { "tf": 1 }, "prefect.Task.result_storage": { "tf": 1 }, "prefect.Task.result_storage_key": { "tf": 1 } }, "df": 3 } } } } }, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State": { "tf": 1 }, "prefect.State.type": { "tf": 1 }, "prefect.State.name": { "tf": 1 }, "prefect.State.timestamp": { "tf": 1 }, "prefect.State.message": { "tf": 1 }, "prefect.State.state_details": { "tf": 1.4142135623730951 }, "prefect.State.data": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1.4142135623730951 }, "prefect.State.default_name_from_type": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.is_scheduled": { "tf": 1 }, "prefect.State.is_pending": { "tf": 1 }, "prefect.State.is_running": { "tf": 1 }, "prefect.State.is_completed": { "tf": 1 }, "prefect.State.is_failed": { "tf": 1 }, "prefect.State.is_crashed": { "tf": 1 }, "prefect.State.is_cancelled": { "tf": 1 }, "prefect.State.is_cancelling": { "tf": 1 }, "prefect.State.is_final": { "tf": 1 }, "prefect.State.is_paused": { "tf": 1 }, "prefect.State.copy": { "tf": 1 } }, "df": 22 } }, "r": { "docs": {}, "df": 0, "t": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1 } } } }, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Manifest.parameter_openapi_schema": { "tf": 1 } }, "df": 1 } }, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.is_scheduled": { "tf": 1 } }, "df": 2 } } } } } } } }, "u": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.submit": { "tf": 1 } }, "df": 1 } } } }, "s": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": { "prefect.suspend_flow_run": { "tf": 1 } }, "df": 1 } } } } } } }, "c": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.Task.cache_key_fn": { "tf": 1 }, "prefect.Task.cache_expiration": { "tf": 1 }, "prefect.Task.refresh_cache": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 } }, "df": 5 } } }, "n": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.on_cancellation": { "tf": 1 } }, "df": 1 } } } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.is_cancelled": { "tf": 1 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.State.is_cancelling": { "tf": 1 } }, "df": 1 } } } } } } } } }, "o": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.on_completion": { "tf": 1 }, "prefect.Task.on_completion": { "tf": 1 } }, "df": 2 } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.is_completed": { "tf": 1 } }, "df": 1 } } } } } } }, "n": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Manifest.model_config": { "tf": 1 } }, "df": 1 } } }, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Task.retry_condition_fn": { "tf": 1 } }, "df": 1 } } } } } } }, "p": { "docs": {}, "df": 0, "y": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } }, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.on_crashed": { "tf": 1 }, "prefect.State.is_crashed": { "tf": 1 } }, "df": 2 } } } } }, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.to_state_create": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 } }, "df": 2 } } } } }, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_client": { "tf": 1 } }, "df": 1 } } } } } }, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.cache_result_in_memory": { "tf": 1 }, "prefect.Task.cache_result_in_memory": { "tf": 1 } }, "df": 2 } } } }, "s": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.message": { "tf": 1 } }, "df": 1 } } } } } }, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Manifest": { "tf": 1 }, "prefect.Manifest.model_config": { "tf": 1 }, "prefect.Manifest.flow_name": { "tf": 1 }, "prefect.Manifest.import_path": { "tf": 1 }, "prefect.Manifest.parameter_openapi_schema": { "tf": 1 }, "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 6 } } } } } }, "p": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 } }, "o": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Manifest.model_config": { "tf": 1 }, "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 2 } } } } }, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.on_completion": { "tf": 1 }, "prefect.Flow.on_failure": { "tf": 1 }, "prefect.Flow.on_cancellation": { "tf": 1 }, "prefect.Flow.on_crashed": { "tf": 1 }, "prefect.Flow.on_running": { "tf": 1 }, "prefect.Task.on_completion": { "tf": 1 }, "prefect.Task.on_failure": { "tf": 1 } }, "df": 7 }, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 2 } } } } }, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "i": { "docs": { "prefect.Manifest.parameter_openapi_schema": { "tf": 1 } }, "df": 1 } } } } } } }, "w": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 2 } } } }, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 } }, "df": 2 } } }, "k": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Task.cache_key_fn": { "tf": 1 }, "prefect.Task.result_storage_key": { "tf": 1 } }, "df": 2 } } }, "e": { "docs": {}, "df": 0, "x": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Task.cache_expiration": { "tf": 1 } }, "df": 1 } } } } } } } } } }, "j": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Task.retry_jitter_factor": { "tf": 1 } }, "df": 1 } } } } } }, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.unmapped": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 } }, "df": 2 } } } } } } } } } }, "annotation": { "root": { "docs": { "prefect.Manifest.model_config": { "tf": 1 }, "prefect.Manifest.flow_name": { "tf": 1 }, "prefect.Manifest.import_path": { "tf": 1 }, "prefect.Manifest.parameter_openapi_schema": { "tf": 1 }, "prefect.Manifest.model_fields": { "tf": 1 }, "prefect.State.type": { "tf": 1 }, "prefect.State.name": { "tf": 1 }, "prefect.State.timestamp": { "tf": 1 }, "prefect.State.message": { "tf": 1 }, "prefect.State.state_details": { "tf": 1 }, "prefect.State.data": { "tf": 1 } }, "df": 11, "c": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Manifest.model_config": { "tf": 1 } }, "df": 1 } } } } } } } }, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } } } }, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.State.type": { "tf": 1 }, "prefect.State.state_details": { "tf": 1 } }, "df": 2 } } } } }, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Manifest.model_config": { "tf": 1 } }, "df": 1, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Manifest.model_config": { "tf": 1 } }, "df": 1 } } } } } } } } }, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Manifest.parameter_openapi_schema": { "tf": 1 } }, "df": 1 } } } } } } } } }, "v": { "1": { "docs": { "prefect.Manifest.model_config": { "tf": 1 }, "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 2 }, "docs": {}, "df": 0 }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Manifest.flow_name": { "tf": 1 }, "prefect.Manifest.import_path": { "tf": 1 } }, "df": 2 }, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.type": { "tf": 1 } }, "df": 1 } } } }, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.state_details": { "tf": 1 } }, "df": 1 } } } } } } } } } } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.type": { "tf": 1 }, "prefect.State.state_details": { "tf": 1 } }, "df": 2 } } } } } } }, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Manifest.parameter_openapi_schema": { "tf": 1 }, "prefect.State.type": { "tf": 1 }, "prefect.State.state_details": { "tf": 1 }, "prefect.State.data": { "tf": 1 } }, "df": 4 } } } } } }, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Manifest.parameter_openapi_schema": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } } }, "y": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 1 } } } } } } }, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "m": { "docs": { "prefect.State.timestamp": { "tf": 1 } }, "df": 1 } } } } } } } }, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Manifest.parameter_openapi_schema": { "tf": 1 } }, "df": 1 } } } } } } } }, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.State.data": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } }, "f": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 1 }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "o": { "docs": { "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 1 } } } } } } } } }, "o": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "j": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.type": { "tf": 1 }, "prefect.State.state_details": { "tf": 1 } }, "df": 2 } } } } } }, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": { "prefect.State.name": { "tf": 1 }, "prefect.State.message": { "tf": 1 } }, "df": 2 } } } } } } } } } } } }, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.timestamp": { "tf": 1.4142135623730951 } }, "df": 1 } } } } }, "a": { "docs": { "prefect.State.data": { "tf": 1 } }, "df": 1, "d": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "~": { "docs": {}, "df": 0, "r": { "docs": { "prefect.State.data": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } } }, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.data": { "tf": 1 } }, "df": 1 } } } } } } } } }, "o": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.data": { "tf": 1 } }, "df": 1 } } } } } } } } }, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.data": { "tf": 1 } }, "df": 1 } } } } } } }, "b": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "~": { "docs": {}, "df": 0, "r": { "docs": { "prefect.State.data": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } }, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "y": { "docs": { "prefect.State.data": { "tf": 1 } }, "df": 1 } } } } }, "default_value": { "root": { "docs": { "prefect.Manifest.model_fields": { "tf": 2.23606797749979 } }, "df": 1, "x": { "2": { "7": { "docs": { "prefect.Manifest.model_fields": { "tf": 3.4641016151377544 } }, "df": 1 }, "docs": {}, "df": 0 }, "docs": {}, "df": 0 }, "f": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.4142135623730951 } }, "df": 1 } } } }, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Manifest.model_fields": { "tf": 2.23606797749979 } }, "df": 1 } } } }, "m": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.7320508075688772 } }, "df": 1 } } } } } } } } } }, "t": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.7320508075688772 } }, "df": 1 } } }, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.7320508075688772 } }, "df": 1 } } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.4142135623730951 } }, "df": 1 } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.4142135623730951 } }, "df": 1 } } } } } }, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "q": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.7320508075688772 } }, "df": 1 } } } } } } } }, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.4142135623730951 } }, "df": 1 } } } } } }, "p": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.4142135623730951 } }, "df": 1 } }, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.4142135623730951 } }, "df": 1, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Manifest.model_fields": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } } } }, "o": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "i": { "docs": { "prefect.Manifest.model_fields": { "tf": 1.4142135623730951 } }, "df": 1 } } } } } } } } }, "signature": { "root": { "1": { "0": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 }, "docs": {}, "df": 0 }, "3": { "6": { "0": { "0": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 }, "docs": {}, "df": 0 }, "docs": {}, "df": 0 }, "9": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 }, "docs": {}, "df": 0 }, "docs": { "prefect.allow_failure.__init__": { "tf": 2.8284271247461903 }, "prefect.flow": { "tf": 27.982137159266443 }, "prefect.Flow.__init__": { "tf": 28.035691537752374 }, "prefect.Flow.with_options": { "tf": 26.551836094703507 }, "prefect.Flow.validate_parameters": { "tf": 6.324555320336759 }, "prefect.Flow.serialize_parameters": { "tf": 6.324555320336759 }, "prefect.Flow.to_deployment": { "tf": 28.982753492378876 }, "prefect.Flow.serve": { "tf": 28.879058156387302 }, "prefect.Flow.from_source": { "tf": 8.888194417315589 }, "prefect.Flow.deploy": { "tf": 27.92848008753788 }, "prefect.Flow.visualize": { "tf": 4.69041575982343 }, "prefect.get_client": { "tf": 9.219544457292887 }, "prefect.get_run_logger": { "tf": 7.745966692414834 }, "prefect.State.result": { "tf": 7.745966692414834 }, "prefect.State.to_state_create": { "tf": 3.1622776601683795 }, "prefect.State.default_name_from_type": { "tf": 5.385164807134504 }, "prefect.State.default_scheduled_start_time": { "tf": 3.7416573867739413 }, "prefect.State.is_scheduled": { "tf": 3.4641016151377544 }, "prefect.State.is_pending": { "tf": 3.4641016151377544 }, "prefect.State.is_running": { "tf": 3.4641016151377544 }, "prefect.State.is_completed": { "tf": 3.4641016151377544 }, "prefect.State.is_failed": { "tf": 3.4641016151377544 }, "prefect.State.is_crashed": { "tf": 3.4641016151377544 }, "prefect.State.is_cancelled": { "tf": 3.4641016151377544 }, "prefect.State.is_cancelling": { "tf": 3.4641016151377544 }, "prefect.State.is_final": { "tf": 3.4641016151377544 }, "prefect.State.is_paused": { "tf": 3.4641016151377544 }, "prefect.State.copy": { "tf": 8.18535277187245 }, "prefect.tags": { "tf": 6 }, "prefect.task": { "tf": 26.92582403567252 }, "prefect.Task.__init__": { "tf": 27.85677655436824 }, "prefect.Task.with_options": { "tf": 22.67156809750927 }, "prefect.Task.create_run": { "tf": 15.362291495737216 }, "prefect.Task.submit": { "tf": 12.449899597988733 }, "prefect.Task.map": { "tf": 9 }, "prefect.Task.serve": { "tf": 7 }, "prefect.unmapped.__init__": { "tf": 2.8284271247461903 }, "prefect.serve": { "tf": 9.273618495495704 }, "prefect.deploy": { "tf": 12.727922061357855 }, "prefect.pause_flow_run": { "tf": 10.908712114635714 }, "prefect.resume_flow_run": { "tf": 5.385164807134504 }, "prefect.suspend_flow_run": { "tf": 11.445523142259598 } }, "df": 42, "v": { "docs": { "prefect.State.default_name_from_type": { "tf": 1 } }, "df": 1, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 } }, "df": 5, "s": { "docs": { "prefect.State.default_name_from_type": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 2 } } }, "i": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 } }, "df": 3 } } } } } }, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 2 } } } } } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 } }, "df": 8 } } } } } }, "i": { "docs": {}, "df": 0, "z": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } }, "f": { "docs": { "prefect.Flow.from_source": { "tf": 1.4142135623730951 } }, "df": 1, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task.__init__": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 5 }, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.flow": { "tf": 2.449489742783178 }, "prefect.Flow.__init__": { "tf": 2.449489742783178 }, "prefect.Flow.with_options": { "tf": 2.449489742783178 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 7, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow.__init__": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 2.23606797749979 } }, "df": 3 } } } }, "a": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow.__init__": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task.__init__": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 9 } } } }, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3, "s": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 7 } } } } } } } } }, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } } } }, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 7 } } } } }, "l": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.get_client": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 10 } } }, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } }, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": { "prefect.State.result": { "tf": 1 } }, "df": 1 } } } }, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 5 } }, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.7320508075688772 }, "prefect.Task.map": { "tf": 1 } }, "df": 3 } } } } } } }, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 4.123105625617661 }, "prefect.Flow.__init__": { "tf": 4 }, "prefect.Flow.with_options": { "tf": 3.872983346207417 }, "prefect.Flow.to_deployment": { "tf": 3.872983346207417 }, "prefect.Flow.serve": { "tf": 3.7416573867739413 }, "prefect.Flow.deploy": { "tf": 4 }, "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 4.69041575982343 }, "prefect.Task.__init__": { "tf": 4.47213595499958 }, "prefect.Task.with_options": { "tf": 3.4641016151377544 }, "prefect.Task.create_run": { "tf": 2.23606797749979 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 2 } }, "df": 22, "t": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 2.8284271247461903 }, "prefect.Flow.__init__": { "tf": 3 }, "prefect.Flow.with_options": { "tf": 3 }, "prefect.Flow.to_deployment": { "tf": 2 }, "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.tags": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task.__init__": { "tf": 2.6457513110645907 }, "prefect.Task.with_options": { "tf": 2.23606797749979 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 12 } } } } } }, "t": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.with_options": { "tf": 2 }, "prefect.Task.with_options": { "tf": 3 } }, "df": 2 } } } }, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } }, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow.__init__": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.__init__": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1 } }, "df": 10 } } }, "e": { "docs": {}, "df": 0, "x": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } }, "w": { "docs": { "prefect.tags": { "tf": 1 } }, "df": 1 } } }, "o": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.flow": { "tf": 3.3166247903554 }, "prefect.Flow.__init__": { "tf": 3.1622776601683795 }, "prefect.Flow.with_options": { "tf": 2.449489742783178 }, "prefect.Flow.to_deployment": { "tf": 3.3166247903554 }, "prefect.Flow.serve": { "tf": 3.1622776601683795 }, "prefect.Flow.deploy": { "tf": 3.605551275463989 }, "prefect.get_client": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 3 }, "prefect.Task.__init__": { "tf": 4.123105625617661 }, "prefect.Task.with_options": { "tf": 2.8284271247461903 }, "prefect.Task.create_run": { "tf": 2 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 2.23606797749979 } }, "df": 21, "[": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 } }, "df": 2 } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.with_options": { "tf": 1 } }, "df": 1 } } }, "f": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.with_options": { "tf": 1 } }, "df": 1 } } } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Task.with_options": { "tf": 1 } }, "df": 1 } } } } } } } } } } }, "n": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow.__init__": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 2.23606797749979 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.__init__": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1 } }, "df": 9 }, "b": { "docs": {}, "df": 0, "j": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 3.872983346207417 }, "prefect.Flow.__init__": { "tf": 3.872983346207417 }, "prefect.Flow.with_options": { "tf": 3.872983346207417 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 2.449489742783178 }, "prefect.Task.__init__": { "tf": 2.449489742783178 }, "prefect.Task.with_options": { "tf": 2.449489742783178 }, "prefect.Task.create_run": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1.4142135623730951 } }, "df": 11 } } } } } }, "r": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.get_client": { "tf": 1.4142135623730951 }, "prefect.Task.create_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 3 } } } } } } } } } } } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 2.6457513110645907 }, "prefect.Flow.__init__": { "tf": 2.6457513110645907 }, "prefect.Flow.with_options": { "tf": 2.6457513110645907 }, "prefect.Flow.validate_parameters": { "tf": 1.4142135623730951 }, "prefect.Flow.serialize_parameters": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 3.3166247903554 }, "prefect.Flow.serve": { "tf": 2.8284271247461903 }, "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 3 }, "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.tags": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 3.3166247903554 }, "prefect.Task.__init__": { "tf": 3.3166247903554 }, "prefect.Task.with_options": { "tf": 3 }, "prefect.Task.create_run": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 20 }, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.__init__": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 7 } } } } }, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow.__init__": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 2.23606797749979 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task.__init__": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 8 } }, "r": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2 } } } } } }, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } }, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow.__init__": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.__init__": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 6 } } } } }, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow.__init__": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.__init__": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 6, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } } } } } }, "l": { "docs": {}, "df": 0, "f": { "docs": { "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.State.is_scheduled": { "tf": 1 }, "prefect.State.is_pending": { "tf": 1 }, "prefect.State.is_running": { "tf": 1 }, "prefect.State.is_completed": { "tf": 1 }, "prefect.State.is_failed": { "tf": 1 }, "prefect.State.is_crashed": { "tf": 1 }, "prefect.State.is_cancelled": { "tf": 1 }, "prefect.State.is_cancelling": { "tf": 1 }, "prefect.State.is_final": { "tf": 1 }, "prefect.State.is_paused": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 } }, "df": 25 } }, "q": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 } }, "df": 2, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } } }, "t": { "docs": { "prefect.tags": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 } }, "df": 2, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "s": { "docs": { "prefect.get_client": { "tf": 1 } }, "df": 1 } } } } } } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3, "s": { "docs": { "prefect.flow": { "tf": 3.872983346207417 }, "prefect.Flow.__init__": { "tf": 3.872983346207417 }, "prefect.Flow.with_options": { "tf": 3.872983346207417 }, "prefect.Flow.to_deployment": { "tf": 4.123105625617661 }, "prefect.Flow.serve": { "tf": 4.123105625617661 }, "prefect.Flow.deploy": { "tf": 3.605551275463989 }, "prefect.task": { "tf": 2.449489742783178 }, "prefect.Task.__init__": { "tf": 2.449489742783178 }, "prefect.Task.with_options": { "tf": 2.449489742783178 }, "prefect.Task.create_run": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1.4142135623730951 } }, "df": 11 } } }, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 3 }, "prefect.Flow.serve": { "tf": 3 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 } }, "df": 3 } } } } } } } }, "h": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2 } } } } } } }, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } } }, "y": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": { "prefect.get_client": { "tf": 1 } }, "df": 1, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_client": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 } }, "df": 2 } } } } } } } } } } } } } } } } }, "r": { "docs": { "prefect.Flow.__init__": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 } }, "df": 3, "u": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 10, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 10, "s": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow.__init__": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.serve": { "tf": 1 } }, "df": 4, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } } } } }, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 } }, "df": 3 } } } }, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "x": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } } } } } } } }, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } }, "y": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task.__init__": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 } }, "df": 6 } }, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "n": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 5 } } } }, "s": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 2 }, "prefect.Flow.__init__": { "tf": 2 }, "prefect.Flow.with_options": { "tf": 2 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task.__init__": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 2.23606797749979 } }, "df": 6 } } }, "e": { "docs": {}, "df": 0, "t": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } } } } } }, "a": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } } } } } } } } } } } }, "f": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } } }, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } }, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.result": { "tf": 1 } }, "df": 1 } } } } }, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow.__init__": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 2 }, "prefect.Flow.to_deployment": { "tf": 2.6457513110645907 }, "prefect.Flow.serve": { "tf": 2.6457513110645907 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 2 }, "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task.__init__": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 16, "[": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.__init__": { "tf": 1 } }, "df": 1 } } } } } } } } } } } }, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 2 } } } } } } }, "f": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.with_options": { "tf": 1 } }, "df": 1 } } } } } } } } } }, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.with_options": { "tf": 2 }, "prefect.Task.with_options": { "tf": 3 } }, "df": 2 } } } } } } } }, "u": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 4 } } }, "p": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } } } } }, "c": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 2.449489742783178 }, "prefect.Flow.__init__": { "tf": 2.6457513110645907 }, "prefect.Flow.with_options": { "tf": 2.449489742783178 }, "prefect.task": { "tf": 2.449489742783178 }, "prefect.Task.__init__": { "tf": 2.6457513110645907 }, "prefect.Task.with_options": { "tf": 2.23606797749979 } }, "df": 6, "[": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.with_options": { "tf": 1 } }, "df": 1 } } } } } } } } } } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 2 }, "prefect.Task.__init__": { "tf": 2 }, "prefect.Task.with_options": { "tf": 2 } }, "df": 6 } } }, "n": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 } }, "df": 3 } } } } } } } } } } }, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 2 }, "prefect.Task.with_options": { "tf": 3 } }, "df": 4 } } }, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 3.872983346207417 }, "prefect.Flow.__init__": { "tf": 3.872983346207417 }, "prefect.Flow.with_options": { "tf": 3.872983346207417 }, "prefect.Flow.to_deployment": { "tf": 3 }, "prefect.Flow.serve": { "tf": 3 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 }, "prefect.get_client": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 2.449489742783178 }, "prefect.Task.__init__": { "tf": 2.449489742783178 }, "prefect.Task.with_options": { "tf": 2.449489742783178 }, "prefect.Task.create_run": { "tf": 2.23606797749979 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 13 } } } }, "s": { "docs": { "prefect.Flow.from_source": { "tf": 1 }, "prefect.State.default_name_from_type": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 3 } }, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "k": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 } }, "df": 2 } } } } } } } } } } } } } } } } }, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "x": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.create_run": { "tf": 2 } }, "df": 5 } } } }, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } } } }, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } } }, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } } } }, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 } }, "df": 3 } } } } }, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6, "t": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow.__init__": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 1.7320508075688772 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 2 }, "prefect.Task.__init__": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 12, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 4, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } } } }, "p": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 3, "s": { "docs": { "prefect.Task.create_run": { "tf": 1 } }, "df": 1 } } } } }, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 8 } } } } } } }, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 }, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } }, "g": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } }, "d": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 3 } }, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "y": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } }, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 9 } } } } } } } } }, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.to_deployment": { "tf": 2 }, "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 2 } }, "df": 3, "e": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } }, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } } }, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } } } } }, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.7320508075688772 } }, "df": 5, "e": { "docs": {}, "df": 0, "q": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } } } } }, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } } } } } } } } } }, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1.4142135623730951 }, "prefect.Flow.serialize_parameters": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.get_client": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1.4142135623730951 }, "prefect.resume_flow_run": { "tf": 1 } }, "df": 12 } } }, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } } } } }, "t": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 2, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "k": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow.__init__": { "tf": 2 }, "prefect.Flow.with_options": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 2 }, "prefect.Task.__init__": { "tf": 2 }, "prefect.Task.with_options": { "tf": 2 }, "prefect.Task.create_run": { "tf": 1.4142135623730951 }, "prefect.Task.serve": { "tf": 1.7320508075688772 } }, "df": 8, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": { "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task.__init__": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 } }, "df": 5, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "x": { "docs": {}, "df": 0, "t": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 } }, "df": 4 } } } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.create_run": { "tf": 1 } }, "df": 1 } } } } } } } }, "s": { "docs": { "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task.__init__": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.serve": { "tf": 1 } }, "df": 4 } } }, "g": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 7 } } }, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 8 } } }, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } } } } }, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow.__init__": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.State.result": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.7320508075688772 } }, "df": 9 } }, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 2.23606797749979 }, "prefect.Flow.serve": { "tf": 2.23606797749979 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 } }, "df": 3 } } } } } } }, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 7 } } } }, "p": { "docs": { "prefect.Flow.__init__": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 } }, "df": 2, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 4.358898943540674 }, "prefect.Flow.__init__": { "tf": 4.358898943540674 }, "prefect.Flow.with_options": { "tf": 4.58257569495584 }, "prefect.Flow.to_deployment": { "tf": 4.358898943540674 }, "prefect.Flow.serve": { "tf": 4.242640687119285 }, "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 3.872983346207417 }, "prefect.get_client": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.task": { "tf": 3.4641016151377544 }, "prefect.Task.__init__": { "tf": 3.4641016151377544 }, "prefect.Task.with_options": { "tf": 4.358898943540674 }, "prefect.Task.create_run": { "tf": 2.6457513110645907 }, "prefect.Task.submit": { "tf": 2.23606797749979 }, "prefect.Task.map": { "tf": 1 }, "prefect.Task.serve": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 19, "c": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_client": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 3 } } } } } }, "f": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.7320508075688772 }, "prefect.Task.map": { "tf": 1 } }, "df": 3 } } } } } } } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 4, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } }, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Task.create_run": { "tf": 1 } }, "df": 9 } } } } } } }, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.create_run": { "tf": 1 } }, "df": 1 } } } }, "u": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2, "d": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } }, "t": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } } }, "o": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 3 } }, "l": { "docs": {}, "df": 0, "l": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } }, "u": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } }, "b": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "k": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.serve": { "tf": 1 } }, "df": 4 } } } } } } } } } } } } }, "o": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "l": { "docs": { "prefect.flow": { "tf": 2 }, "prefect.Flow.__init__": { "tf": 2 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 2.449489742783178 }, "prefect.Flow.deploy": { "tf": 2.6457513110645907 }, "prefect.get_client": { "tf": 1 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.State.is_scheduled": { "tf": 1 }, "prefect.State.is_pending": { "tf": 1 }, "prefect.State.is_running": { "tf": 1 }, "prefect.State.is_completed": { "tf": 1 }, "prefect.State.is_failed": { "tf": 1 }, "prefect.State.is_crashed": { "tf": 1 }, "prefect.State.is_cancelled": { "tf": 1 }, "prefect.State.is_cancelling": { "tf": 1 }, "prefect.State.is_final": { "tf": 1 }, "prefect.State.is_paused": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task.__init__": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 2 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 27 } } }, "u": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } }, "l": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 2 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Task.with_options": { "tf": 3 } }, "df": 7 }, "o": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6, "g": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.get_run_logger": { "tf": 1.4142135623730951 } }, "df": 1 } } }, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1, "a": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } } } } } } } } } } }, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow.__init__": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 2.23606797749979 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 2 }, "prefect.Task.__init__": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1 } }, "df": 10, "[": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 1 } } } } } } } }, "m": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2 } } } } }, "x": { "2": { "7": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow.__init__": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 2.8284271247461903 }, "prefect.Task.with_options": { "tf": 4.242640687119285 } }, "df": 4 }, "docs": {}, "df": 0 }, "docs": {}, "df": 0 }, "g": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 2 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Task.with_options": { "tf": 3 } }, "df": 7 }, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.tags": { "tf": 1 } }, "df": 1 } } } } } } } } }, "w": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } } } } } } } } } } } } } }, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "k": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1 } }, "df": 3 } } }, "e": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1 } } } } } } } }, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } }, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 5 } } } }, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } }, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } }, "s": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 3 } } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } } } } } } } } } } } } } } }, "a": { "docs": {}, "df": 0, "w": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1.4142135623730951 } }, "df": 3 } } } } } } } }, "n": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.with_options": { "tf": 2 }, "prefect.Task.with_options": { "tf": 3 } }, "df": 2 } } } } } } } } }, "y": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1.4142135623730951 }, "prefect.Flow.serialize_parameters": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.get_client": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.__init__": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.create_run": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.7320508075688772 } }, "df": 11 } }, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } }, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 2 }, "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 2 } }, "df": 3 } } } } } } } } } }, "r": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.visualize": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 4 } } } }, "e": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 2.8284271247461903 }, "prefect.Flow.serve": { "tf": 2.8284271247461903 }, "prefect.Flow.deploy": { "tf": 2.8284271247461903 } }, "df": 3 }, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } }, "n": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } }, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 4, "t": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 } } } } } } } } } } } }, "g": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "x": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.create_run": { "tf": 1 } }, "df": 1 } } } } } } } } } } } }, "x": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.State.result": { "tf": 1 } }, "df": 1 } } } } } } }, "p": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } } } } }, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Task.create_run": { "tf": 1 } }, "df": 1 } } } } }, "q": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 2 } } } } }, "j": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "b": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 2 } }, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.__init__": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } } }, "k": { "docs": {}, "df": 0, "w": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.visualize": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.default_name_from_type": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 7 } } } } }, "e": { "docs": {}, "df": 0, "y": { "docs": { "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.__init__": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 5 } } }, "h": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "x": { "docs": { "prefect.get_client": { "tf": 1 } }, "df": 1 } } } } } } }, "bases": { "root": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.Manifest": { "tf": 1 }, "prefect.State": { "tf": 1 }, "prefect.unmapped": { "tf": 1 } }, "df": 4 } } } } } }, "y": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Manifest": { "tf": 1 } }, "df": 1 } } } } } } } }, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.unmapped": { "tf": 1 } }, "df": 2 } } } } } } } } }, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "s": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.unmapped": { "tf": 1 } }, "df": 2 } } } } } } } } } } }, "b": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "~": { "docs": {}, "df": 0, "t": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.unmapped": { "tf": 1 } }, "df": 2 } } } } } } } } } } } } }, "m": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Manifest": { "tf": 1 } }, "df": 1 } } } } }, "s": { "docs": { "prefect.State": { "tf": 1 } }, "df": 1 } } } } }, "t": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.State": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 3 } } } } } }, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "~": { "docs": {}, "df": 0, "p": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 }, "r": { "docs": { "prefect.State": { "tf": 1 } }, "df": 1 } } } } } } } } } }, "r": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Manifest": { "tf": 1 }, "prefect.State": { "tf": 1 } }, "df": 2 } } } } } } } }, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Manifest": { "tf": 1 } }, "df": 1 } } } } } }, "s": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State": { "tf": 1 } }, "df": 1 } } } } } } }, "o": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "j": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": { "prefect.State": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } } } } } }, "doc": { "root": { "0": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 3 }, "1": { "0": { "docs": { "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 2 }, "1": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 }, "2": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 }, "3": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 }, "docs": { "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 1.7320508075688772 }, "prefect.Task.map": { "tf": 3.3166247903554 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 9 }, "2": { "0": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 }, "1": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 }, "2": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 }, "3": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 }, "docs": { "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.7320508075688772 }, "prefect.Task.map": { "tf": 3.3166247903554 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 4 }, "3": { "6": { "0": { "0": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 3 }, "docs": {}, "df": 0 }, "docs": {}, "df": 0 }, "9": { "docs": { "prefect.deploy": { "tf": 1 } }, "df": 1 }, "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 2 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.map": { "tf": 2.8284271247461903 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 6 }, "4": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 3, ":": { "0": { "0": { "docs": { "prefect.serve": { "tf": 1 } }, "df": 1 }, "docs": {}, "df": 0 }, "docs": {}, "df": 0 } }, "5": { "0": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 }, "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 } }, "df": 4 }, "docs": { "prefect": { "tf": 1.7320508075688772 }, "prefect.allow_failure": { "tf": 3.4641016151377544 }, "prefect.allow_failure.__init__": { "tf": 1.7320508075688772 }, "prefect.flow": { "tf": 7.280109889280518 }, "prefect.Flow": { "tf": 5.744562646538029 }, "prefect.Flow.__init__": { "tf": 1.7320508075688772 }, "prefect.Flow.name": { "tf": 1.7320508075688772 }, "prefect.Flow.flow_run_name": { "tf": 1.7320508075688772 }, "prefect.Flow.task_runner": { "tf": 1.7320508075688772 }, "prefect.Flow.log_prints": { "tf": 1.7320508075688772 }, "prefect.Flow.description": { "tf": 1.7320508075688772 }, "prefect.Flow.fn": { "tf": 1.7320508075688772 }, "prefect.Flow.isasync": { "tf": 1.7320508075688772 }, "prefect.Flow.version": { "tf": 1.7320508075688772 }, "prefect.Flow.timeout_seconds": { "tf": 1.7320508075688772 }, "prefect.Flow.retries": { "tf": 1.7320508075688772 }, "prefect.Flow.retry_delay_seconds": { "tf": 1.7320508075688772 }, "prefect.Flow.parameters": { "tf": 1.7320508075688772 }, "prefect.Flow.should_validate_parameters": { "tf": 1.7320508075688772 }, "prefect.Flow.persist_result": { "tf": 1.7320508075688772 }, "prefect.Flow.result_storage": { "tf": 1.7320508075688772 }, "prefect.Flow.result_serializer": { "tf": 1.7320508075688772 }, "prefect.Flow.cache_result_in_memory": { "tf": 1.7320508075688772 }, "prefect.Flow.on_completion": { "tf": 1.7320508075688772 }, "prefect.Flow.on_failure": { "tf": 1.7320508075688772 }, "prefect.Flow.on_cancellation": { "tf": 1.7320508075688772 }, "prefect.Flow.on_crashed": { "tf": 1.7320508075688772 }, "prefect.Flow.on_running": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 4.795831523312719 }, "prefect.Flow.validate_parameters": { "tf": 2.6457513110645907 }, "prefect.Flow.serialize_parameters": { "tf": 2.8284271247461903 }, "prefect.Flow.to_deployment": { "tf": 14.2828568570857 }, "prefect.Flow.serve": { "tf": 10.392304845413264 }, "prefect.Flow.from_source": { "tf": 10.535653752852738 }, "prefect.Flow.deploy": { "tf": 12.041594578792296 }, "prefect.Flow.visualize": { "tf": 3.1622776601683795 }, "prefect.get_client": { "tf": 9.219544457292887 }, "prefect.get_run_logger": { "tf": 4.358898943540674 }, "prefect.Manifest": { "tf": 1.7320508075688772 }, "prefect.Manifest.model_config": { "tf": 1.7320508075688772 }, "prefect.Manifest.flow_name": { "tf": 1.7320508075688772 }, "prefect.Manifest.import_path": { "tf": 1.7320508075688772 }, "prefect.Manifest.parameter_openapi_schema": { "tf": 1.7320508075688772 }, "prefect.Manifest.model_fields": { "tf": 1.7320508075688772 }, "prefect.State": { "tf": 1.7320508075688772 }, "prefect.State.type": { "tf": 1.7320508075688772 }, "prefect.State.name": { "tf": 1.7320508075688772 }, "prefect.State.timestamp": { "tf": 1.7320508075688772 }, "prefect.State.message": { "tf": 1.7320508075688772 }, "prefect.State.state_details": { "tf": 1.7320508075688772 }, "prefect.State.data": { "tf": 1.7320508075688772 }, "prefect.State.result": { "tf": 6.782329983125268 }, "prefect.State.to_state_create": { "tf": 3.1622776601683795 }, "prefect.State.default_name_from_type": { "tf": 1.7320508075688772 }, "prefect.State.default_scheduled_start_time": { "tf": 2 }, "prefect.State.is_scheduled": { "tf": 1.7320508075688772 }, "prefect.State.is_pending": { "tf": 1.7320508075688772 }, "prefect.State.is_running": { "tf": 1.7320508075688772 }, "prefect.State.is_completed": { "tf": 1.7320508075688772 }, "prefect.State.is_failed": { "tf": 1.7320508075688772 }, "prefect.State.is_crashed": { "tf": 1.7320508075688772 }, "prefect.State.is_cancelled": { "tf": 1.7320508075688772 }, "prefect.State.is_cancelling": { "tf": 1.7320508075688772 }, "prefect.State.is_final": { "tf": 1.7320508075688772 }, "prefect.State.is_paused": { "tf": 1.7320508075688772 }, "prefect.State.copy": { "tf": 1.7320508075688772 }, "prefect.tags": { "tf": 6.782329983125268 }, "prefect.task": { "tf": 7.280109889280518 }, "prefect.Task": { "tf": 6.164414002968976 }, "prefect.Task.__init__": { "tf": 1.7320508075688772 }, "prefect.Task.description": { "tf": 1.7320508075688772 }, "prefect.Task.fn": { "tf": 1.7320508075688772 }, "prefect.Task.isasync": { "tf": 1.7320508075688772 }, "prefect.Task.task_run_name": { "tf": 1.7320508075688772 }, "prefect.Task.version": { "tf": 1.7320508075688772 }, "prefect.Task.log_prints": { "tf": 1.7320508075688772 }, "prefect.Task.tags": { "tf": 1.7320508075688772 }, "prefect.Task.cache_key_fn": { "tf": 1.7320508075688772 }, "prefect.Task.cache_expiration": { "tf": 1.7320508075688772 }, "prefect.Task.refresh_cache": { "tf": 1.7320508075688772 }, "prefect.Task.retries": { "tf": 1.7320508075688772 }, "prefect.Task.retry_jitter_factor": { "tf": 1.7320508075688772 }, "prefect.Task.persist_result": { "tf": 1.7320508075688772 }, "prefect.Task.result_storage": { "tf": 1.7320508075688772 }, "prefect.Task.result_serializer": { "tf": 1.7320508075688772 }, "prefect.Task.result_storage_key": { "tf": 1.7320508075688772 }, "prefect.Task.cache_result_in_memory": { "tf": 1.7320508075688772 }, "prefect.Task.timeout_seconds": { "tf": 1.7320508075688772 }, "prefect.Task.on_completion": { "tf": 1.7320508075688772 }, "prefect.Task.on_failure": { "tf": 1.7320508075688772 }, "prefect.Task.retry_condition_fn": { "tf": 1.7320508075688772 }, "prefect.Task.viz_return_value": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 5.744562646538029 }, "prefect.Task.create_run": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 6.48074069840786 }, "prefect.Task.map": { "tf": 6.48074069840786 }, "prefect.Task.serve": { "tf": 5.385164807134504 }, "prefect.unmapped": { "tf": 2.449489742783178 }, "prefect.unmapped.__init__": { "tf": 1.7320508075688772 }, "prefect.serve": { "tf": 16.15549442140351 }, "prefect.deploy": { "tf": 14.317821063276353 }, "prefect.pause_flow_run": { "tf": 12.328828005937952 }, "prefect.resume_flow_run": { "tf": 2.449489742783178 }, "prefect.suspend_flow_run": { "tf": 3.605551275463989 } }, "df": 104, "w": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "p": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.unmapped": { "tf": 1 } }, "df": 2 }, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 } }, "df": 4 } } }, "s": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } }, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } } } }, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": { "prefect.allow_failure": { "tf": 1.4142135623730951 }, "prefect.flow": { "tf": 4.358898943540674 }, "prefect.Flow": { "tf": 4 }, "prefect.Flow.to_deployment": { "tf": 2.449489742783178 }, "prefect.Flow.serve": { "tf": 2.6457513110645907 }, "prefect.Flow.deploy": { "tf": 3 }, "prefect.get_run_logger": { "tf": 1.7320508075688772 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.task": { "tf": 2.8284271247461903 }, "prefect.Task": { "tf": 2.6457513110645907 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1.7320508075688772 }, "prefect.Task.map": { "tf": 2.23606797749979 }, "prefect.Task.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 3.872983346207417 }, "prefect.suspend_flow_run": { "tf": 3.1622776601683795 } }, "df": 17 } }, "t": { "docs": {}, "df": 0, "h": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.7320508075688772 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.get_client": { "tf": 1.7320508075688772 }, "prefect.tags": { "tf": 3.3166247903554 }, "prefect.task": { "tf": 2.6457513110645907 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 2.23606797749979 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.7320508075688772 }, "prefect.Task.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 18, "i": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 7 } }, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 5 } } } } }, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } }, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "k": { "docs": { "prefect.Flow.to_deployment": { "tf": 2.8284271247461903 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 4.123105625617661 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.deploy": { "tf": 2.6457513110645907 } }, "df": 5, "f": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 } }, "df": 3 } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 } } } } }, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.4142135623730951 } }, "df": 3, "b": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.serve": { "tf": 1.4142135623730951 } }, "df": 1 } } } } }, "o": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "k": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.serve": { "tf": 1 } }, "df": 1 } } } } } } } }, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 2.449489742783178 }, "prefect.Task.map": { "tf": 2.449489742783178 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 11 } }, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } }, "r": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 1 } } } } } }, "y": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } }, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } }, "t": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 2.23606797749979 }, "prefect.Flow.deploy": { "tf": 2.6457513110645907 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 11 } } } }, "n": { "docs": { "prefect.flow": { "tf": 2.449489742783178 }, "prefect.Flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 2.23606797749979 }, "prefect.Flow.to_deployment": { "tf": 2.6457513110645907 }, "prefect.Flow.serve": { "tf": 3 }, "prefect.Flow.deploy": { "tf": 2.6457513110645907 }, "prefect.task": { "tf": 2.6457513110645907 }, "prefect.Task": { "tf": 2.449489742783178 }, "prefect.Task.with_options": { "tf": 2.23606797749979 }, "prefect.pause_flow_run": { "tf": 2 }, "prefect.suspend_flow_run": { "tf": 1.7320508075688772 } }, "df": 11 } }, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 11 } }, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 2 } } } } }, "f": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.tags": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1.4142135623730951 } }, "df": 5, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.allow_failure": { "tf": 1.4142135623730951 }, "prefect.flow": { "tf": 3.4641016151377544 }, "prefect.Flow": { "tf": 3.4641016151377544 }, "prefect.Flow.with_options": { "tf": 2.6457513110645907 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 3.3166247903554 }, "prefect.Flow.serve": { "tf": 3 }, "prefect.Flow.deploy": { "tf": 3.4641016151377544 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.get_client": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1.7320508075688772 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.task": { "tf": 3.3166247903554 }, "prefect.Task": { "tf": 3.1622776601683795 }, "prefect.Task.with_options": { "tf": 3.605551275463989 }, "prefect.Task.submit": { "tf": 2.449489742783178 }, "prefect.Task.map": { "tf": 3 }, "prefect.Task.serve": { "tf": 1.4142135623730951 }, "prefect.unmapped": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 2 }, "prefect.pause_flow_run": { "tf": 2.23606797749979 }, "prefect.suspend_flow_run": { "tf": 1.7320508075688772 } }, "df": 24, "m": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1, "a": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } }, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.visualize": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 } }, "df": 2 } } } }, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.result": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 2.449489742783178 } }, "df": 3, "s": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 2.449489742783178 } }, "df": 3 } } } } }, "n": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 3.1622776601683795 }, "prefect.Flow": { "tf": 2.6457513110645907 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 15, "s": { "docs": { "prefect.flow": { "tf": 2.6457513110645907 }, "prefect.task": { "tf": 1 } }, "df": 2 } } } } } } }, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } }, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 3, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.allow_failure": { "tf": 2 }, "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.State.result": { "tf": 2.23606797749979 }, "prefect.task": { "tf": 2 }, "prefect.Task": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 9 } }, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 1.7320508075688772 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 } }, "df": 7 } } }, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } }, "l": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 10 } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "i": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } } } }, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 } }, "df": 3, "y": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } } } }, "r": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } }, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "m": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 2.8284271247461903 }, "prefect.Flow": { "tf": 2 }, "prefect.Flow.with_options": { "tf": 2 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.from_source": { "tf": 3.3166247903554 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 2.449489742783178 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 2.449489742783178 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 2 }, "prefect.Task.submit": { "tf": 2 }, "prefect.Task.map": { "tf": 2 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 20 } } }, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.flow": { "tf": 7.937253933193772 }, "prefect.Flow": { "tf": 5.196152422706632 }, "prefect.Flow.with_options": { "tf": 5.656854249492381 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 3.1622776601683795 }, "prefect.Flow.serve": { "tf": 4 }, "prefect.Flow.from_source": { "tf": 4 }, "prefect.Flow.deploy": { "tf": 3.872983346207417 }, "prefect.Flow.visualize": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.Manifest": { "tf": 1 }, "prefect.State.result": { "tf": 4.358898943540674 }, "prefect.tags": { "tf": 3.605551275463989 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 4.69041575982343 }, "prefect.Task.map": { "tf": 4.47213595499958 }, "prefect.serve": { "tf": 2.6457513110645907 }, "prefect.deploy": { "tf": 3.3166247903554 }, "prefect.pause_flow_run": { "tf": 5.656854249492381 }, "prefect.resume_flow_run": { "tf": 2 }, "prefect.suspend_flow_run": { "tf": 4.358898943540674 } }, "df": 23, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 5 }, "v": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } } } } } } } } }, "a": { "docs": {}, "df": 0, "g": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 1 } } }, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.Flow.from_source": { "tf": 1 } }, "df": 3 } }, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.flow": { "tf": 2.23606797749979 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 4, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 } } } } } }, "r": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.task": { "tf": 1 } }, "df": 2 } } } }, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 4 } } } } } }, "t": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": { "prefect.State.result": { "tf": 1 } }, "df": 1 } } } }, "n": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 4 } }, "s": { "2": { "docs": { "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.from_source": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 2.449489742783178 } }, "df": 3 }, "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 2 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 2 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 15, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 2.449489742783178 }, "prefect.Flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 2.6457513110645907 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State": { "tf": 1 }, "prefect.State.result": { "tf": 4.47213595499958 }, "prefect.State.to_state_create": { "tf": 1.7320508075688772 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 2.6457513110645907 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 } }, "df": 14, "s": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1 } }, "df": 4 }, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 4 } } } } }, "c": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.to_state_create": { "tf": 1 } }, "df": 1 } } } } } }, "t": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } } }, "i": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 1 } } }, "r": { "docs": {}, "df": 0, "t": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 } }, "df": 2, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 3 } }, "s": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1 }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 4 } } }, "u": { "docs": {}, "df": 0, "p": { "docs": { "prefect.serve": { "tf": 1 } }, "df": 1 } } } } }, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 10, "s": { "docs": { "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 2 } } } } }, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.from_source": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 2 }, "prefect.Task": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 } }, "df": 7 } } }, "e": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 4 } } }, "p": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.suspend_flow_run": { "tf": 1 } }, "df": 1 } } } } } }, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Flow.with_options": { "tf": 1 } }, "df": 1 } } }, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 2 } } } }, "y": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": { "prefect.get_client": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1 } }, "df": 3, "h": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.get_client": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.task": { "tf": 1 } }, "df": 4 } } } } } } } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1.4142135623730951 } }, "df": 3 } } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 2 } } } }, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 4, "r": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 6 } } } } } } }, "v": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 2.23606797749979 }, "prefect.Task.serve": { "tf": 1.7320508075688772 }, "prefect.serve": { "tf": 2.23606797749979 } }, "df": 4, "d": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1 }, "r": { "docs": { "prefect.Task.serve": { "tf": 1 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Task.serve": { "tf": 1 } }, "df": 1 } } } } }, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": { "prefect.task": { "tf": 1 } }, "df": 1, "s": { "docs": { "prefect.flow": { "tf": 2 }, "prefect.Flow": { "tf": 2 }, "prefect.Flow.with_options": { "tf": 2 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 2.449489742783178 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 2.6457513110645907 }, "prefect.pause_flow_run": { "tf": 2 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 11 } } } }, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.from_source": { "tf": 2 } }, "df": 1 } } } }, "t": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 10, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 5, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } } }, "q": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "k": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 3 } } } } } } } } } } } } } } } } } }, "n": { "docs": {}, "df": 0, "d": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 }, "t": { "docs": { "prefect.State.to_state_create": { "tf": 1 }, "prefect.unmapped": { "tf": 1 } }, "df": 2 } } }, "h": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 3 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 3 }, "prefect.Task": { "tf": 3 }, "prefect.Task.with_options": { "tf": 2 }, "prefect.unmapped": { "tf": 1 } }, "df": 14 } } } }, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1.4142135623730951 } }, "df": 2 } } } } } } }, "u": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 } }, "df": 2 } } } }, "m": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.submit": { "tf": 3 }, "prefect.Task.map": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 4, "s": { "docs": { "prefect.flow": { "tf": 1 } }, "df": 1 }, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.Task.serve": { "tf": 1 } }, "df": 3 } } } }, "s": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } } } } }, "c": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "s": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } } }, "c": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 } }, "df": 2 }, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } } } } }, "n": { "docs": { "prefect.serve": { "tf": 1 } }, "df": 1, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "y": { "docs": { "prefect.serve": { "tf": 1 } }, "df": 1 } } } }, "p": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1.7320508075688772 } }, "df": 2 } } } }, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } } } }, "s": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": { "prefect.suspend_flow_run": { "tf": 2.23606797749979 } }, "df": 1, "s": { "docs": { "prefect.suspend_flow_run": { "tf": 1.7320508075688772 } }, "df": 1 }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.suspend_flow_run": { "tf": 1 } }, "df": 1 } } } } } } } } }, "i": { "docs": { "prefect.Flow.serve": { "tf": 1.4142135623730951 } }, "df": 1, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.task": { "tf": 1 } }, "df": 2 } } } }, "n": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 2 } } } } }, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 5 } }, "c": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } }, "y": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 3 } } } } } } } }, "a": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.serve": { "tf": 8.366600265340756 }, "prefect.Flow.from_source": { "tf": 9.591663046625438 }, "prefect.Flow.deploy": { "tf": 8.602325267042627 } }, "df": 3 } }, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.unmapped": { "tf": 1 } }, "df": 1 } } } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 3 }, "prefect.Flow.serve": { "tf": 3.605551275463989 }, "prefect.Flow.deploy": { "tf": 3.605551275463989 }, "prefect.deploy": { "tf": 1 } }, "df": 4, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 2.449489742783178 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 }, "prefect.serve": { "tf": 1 } }, "df": 4 }, "d": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 } } } } } }, "m": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 } } } }, "o": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1 } } } }, "a": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 3 } } }, "o": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.from_source": { "tf": 2.449489742783178 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 3 } } } }, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 2 } } } } } } } }, "k": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "p": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 2, "p": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } } } } }, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "p": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } } }, "o": { "docs": { "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.from_source": { "tf": 2.6457513110645907 }, "prefect.Flow.deploy": { "tf": 2.6457513110645907 } }, "df": 3, "r": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 2.23606797749979 }, "prefect.Flow.serve": { "tf": 2.8284271247461903 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 3.1622776601683795 }, "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 2.449489742783178 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 16, "g": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } } } } }, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 5, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } } } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } } } } } } } }, "f": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.flow": { "tf": 4.242640687119285 }, "prefect.Flow": { "tf": 3.7416573867739413 }, "prefect.Flow.with_options": { "tf": 3 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 4.123105625617661 }, "prefect.Flow.serve": { "tf": 4.242640687119285 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 4.795831523312719 }, "prefect.Manifest": { "tf": 1 }, "prefect.State": { "tf": 1 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1.4142135623730951 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 4.358898943540674 }, "prefect.Task": { "tf": 4.358898943540674 }, "prefect.Task.with_options": { "tf": 3.4641016151377544 }, "prefect.Task.submit": { "tf": 2 }, "prefect.Task.map": { "tf": 2.6457513110645907 }, "prefect.unmapped": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 }, "prefect.serve": { "tf": 1.7320508075688772 }, "prefect.deploy": { "tf": 2.6457513110645907 }, "prefect.pause_flow_run": { "tf": 2.449489742783178 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 28, "f": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } }, "p": { "docs": {}, "df": 0, "t": { "docs": { "prefect.allow_failure": { "tf": 1 } }, "df": 1, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 4, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.flow": { "tf": 4.123105625617661 }, "prefect.Flow": { "tf": 4 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 4.242640687119285 }, "prefect.Task": { "tf": 4.242640687119285 }, "prefect.Task.with_options": { "tf": 2 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 8, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } }, "s": { "docs": { "prefect.Flow.with_options": { "tf": 1.7320508075688772 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 2.23606797749979 } }, "df": 5 } } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.unmapped": { "tf": 1 } }, "df": 1 } } } } } } } }, "n": { "docs": { "prefect.flow": { "tf": 3 }, "prefect.Flow": { "tf": 3 }, "prefect.Flow.with_options": { "tf": 2.449489742783178 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.serve": { "tf": 1.7320508075688772 }, "prefect.deploy": { "tf": 1 } }, "df": 14, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 } }, "df": 8 } }, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 5 } }, "e": { "docs": { "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2 } }, "df": 2 } }, "b": { "docs": {}, "df": 0, "j": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 10, "s": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 4 } } } } }, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } } } } }, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.Task.map": { "tf": 2 } }, "df": 2, "p": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } }, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } } } }, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 } }, "df": 3 } } } } } } }, "t": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.serve": { "tf": 1.4142135623730951 } }, "df": 4, "w": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } } } } }, "h": { "docs": { "prefect.State.result": { "tf": 1.7320508075688772 } }, "df": 1 } }, "i": { "docs": { "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 2, "n": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Flow.visualize": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1.7320508075688772 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.task": { "tf": 2.8284271247461903 }, "prefect.Task": { "tf": 2.6457513110645907 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 2.6457513110645907 }, "prefect.Task.map": { "tf": 2.23606797749979 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 18, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.unmapped": { "tf": 1 } }, "df": 6 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2 }, "prefect.Task": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } } } } }, "p": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.allow_failure": { "tf": 1.7320508075688772 }, "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.unmapped": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2.23606797749979 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 2.23606797749979 } }, "df": 10, "s": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 } }, "df": 4 } } } }, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2, "o": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.copy": { "tf": 1 } }, "df": 6 }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.with_options": { "tf": 1 } }, "df": 1 } } } } } } }, "v": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 5 } } }, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } }, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } } } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 7 } }, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 } }, "df": 1 } } } } } } }, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } }, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.unmapped": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 5 } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } } } }, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.tags": { "tf": 1 } }, "df": 1 } } } } }, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 5 } } } } }, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } } } } } } } }, "c": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } } }, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } }, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } }, "f": { "docs": { "prefect.allow_failure": { "tf": 1.4142135623730951 }, "prefect.flow": { "tf": 3.605551275463989 }, "prefect.Flow": { "tf": 3.3166247903554 }, "prefect.Flow.with_options": { "tf": 2 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 2.449489742783178 }, "prefect.Flow.serve": { "tf": 2.8284271247461903 }, "prefect.Flow.deploy": { "tf": 3 }, "prefect.Flow.visualize": { "tf": 1.7320508075688772 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.task": { "tf": 3.4641016151377544 }, "prefect.Task": { "tf": 3.3166247903554 }, "prefect.Task.with_options": { "tf": 2.6457513110645907 }, "prefect.Task.submit": { "tf": 2 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 2.8284271247461903 }, "prefect.suspend_flow_run": { "tf": 2.449489742783178 } }, "df": 22 }, "t": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 2.23606797749979 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 14, "s": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 5 }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.Task.map": { "tf": 2.6457513110645907 } }, "df": 3, "s": { "docs": { "prefect.Task.map": { "tf": 1 }, "prefect.unmapped": { "tf": 1 } }, "df": 2 } } } } } }, "m": { "docs": { "prefect.Task.map": { "tf": 2 } }, "df": 1, "s": { "docs": { "prefect.Task.map": { "tf": 2 } }, "df": 1 } } } }, "s": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 3.3166247903554 }, "prefect.Flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 2.23606797749979 }, "prefect.State.to_state_create": { "tf": 1.4142135623730951 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 2.8284271247461903 }, "prefect.Task": { "tf": 2.449489742783178 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 2 }, "prefect.Task.map": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 }, "prefect.unmapped": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2 }, "prefect.suspend_flow_run": { "tf": 1.7320508075688772 } }, "df": 22, "n": { "docs": { "prefect.Flow.visualize": { "tf": 1.4142135623730951 } }, "df": 1 } }, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.from_source": { "tf": 2.449489742783178 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.State.result": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.7320508075688772 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1 } }, "df": 14, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } } } } }, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } } } } }, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.deploy": { "tf": 3.1622776601683795 }, "prefect.deploy": { "tf": 3 } }, "df": 2, ":": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "v": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } } } }, "g": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 1, "d": { "docs": { "prefect.Task.with_options": { "tf": 1 } }, "df": 1 } } } } } }, "d": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.resume_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 4, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } } } } } }, "s": { "docs": { "prefect.deploy": { "tf": 1 } }, "df": 1 } }, "p": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } } } }, "t": { "docs": { "prefect.Flow.visualize": { "tf": 1.7320508075688772 } }, "df": 1, "h": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 3.4641016151377544 }, "prefect.Task": { "tf": 3 }, "prefect.Task.with_options": { "tf": 2.23606797749979 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 2 }, "prefect.unmapped": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2.23606797749979 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 19 }, "n": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } }, "e": { "docs": { "prefect.allow_failure": { "tf": 1.7320508075688772 }, "prefect.flow": { "tf": 8.12403840463596 }, "prefect.Flow": { "tf": 6.708203932499369 }, "prefect.Flow.with_options": { "tf": 4 }, "prefect.Flow.validate_parameters": { "tf": 2.449489742783178 }, "prefect.Flow.to_deployment": { "tf": 5.196152422706632 }, "prefect.Flow.serve": { "tf": 4.795831523312719 }, "prefect.Flow.from_source": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 6.557438524302 }, "prefect.Flow.visualize": { "tf": 1.7320508075688772 }, "prefect.get_client": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 3 }, "prefect.State": { "tf": 1 }, "prefect.State.result": { "tf": 3.4641016151377544 }, "prefect.State.to_state_create": { "tf": 1.7320508075688772 }, "prefect.State.copy": { "tf": 1.7320508075688772 }, "prefect.tags": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 6.48074069840786 }, "prefect.Task": { "tf": 6.782329983125268 }, "prefect.Task.with_options": { "tf": 4.69041575982343 }, "prefect.Task.submit": { "tf": 4.242640687119285 }, "prefect.Task.map": { "tf": 4.47213595499958 }, "prefect.Task.serve": { "tf": 2.6457513110645907 }, "prefect.serve": { "tf": 2 }, "prefect.deploy": { "tf": 4.123105625617661 }, "prefect.pause_flow_run": { "tf": 5.916079783099616 }, "prefect.resume_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 4.795831523312719 } }, "df": 28, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 5 } }, "m": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2 }, "y": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 2 } }, "i": { "docs": {}, "df": 0, "s": { "docs": { "prefect.allow_failure": { "tf": 1.4142135623730951 }, "prefect.flow": { "tf": 3.872983346207417 }, "prefect.Flow": { "tf": 3.4641016151377544 }, "prefect.Flow.with_options": { "tf": 1.7320508075688772 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 3.3166247903554 }, "prefect.Flow.serve": { "tf": 3.1622776601683795 }, "prefect.Flow.deploy": { "tf": 3.3166247903554 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1.7320508075688772 }, "prefect.State.to_state_create": { "tf": 1.7320508075688772 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.task": { "tf": 3.605551275463989 }, "prefect.Task": { "tf": 3.4641016151377544 }, "prefect.Task.with_options": { "tf": 2 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.Task.serve": { "tf": 1 }, "prefect.unmapped": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2.23606797749979 }, "prefect.suspend_flow_run": { "tf": 2 } }, "df": 22 } }, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 2 } }, "df": 1 } }, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1 } } }, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } } } } } }, "o": { "docs": { "prefect.allow_failure": { "tf": 1.7320508075688772 }, "prefect.flow": { "tf": 5 }, "prefect.Flow": { "tf": 4.69041575982343 }, "prefect.Flow.with_options": { "tf": 3.3166247903554 }, "prefect.Flow.validate_parameters": { "tf": 1.7320508075688772 }, "prefect.Flow.serialize_parameters": { "tf": 2 }, "prefect.Flow.to_deployment": { "tf": 4.58257569495584 }, "prefect.Flow.serve": { "tf": 4.898979485566356 }, "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 6.164414002968976 }, "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 2.449489742783178 }, "prefect.State.result": { "tf": 2.449489742783178 }, "prefect.State.to_state_create": { "tf": 1.7320508075688772 }, "prefect.tags": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 5.196152422706632 }, "prefect.Task": { "tf": 5.291502622129181 }, "prefect.Task.with_options": { "tf": 4 }, "prefect.Task.submit": { "tf": 3 }, "prefect.Task.map": { "tf": 3.3166247903554 }, "prefect.Task.serve": { "tf": 1.7320508075688772 }, "prefect.unmapped": { "tf": 1 }, "prefect.serve": { "tf": 2.8284271247461903 }, "prefect.deploy": { "tf": 4 }, "prefect.pause_flow_run": { "tf": 3.7416573867739413 }, "prefect.resume_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 3.1622776601683795 } }, "df": 27, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 4 } } } }, "k": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.from_source": { "tf": 1.7320508075688772 } }, "df": 1 } } }, "d": { "docs": {}, "df": 0, "o": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1 } }, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } }, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 8 } } } } } }, "r": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 1 } } } } } } }, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 4, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } } } }, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2.23606797749979 }, "prefect.suspend_flow_run": { "tf": 1.7320508075688772 } }, "df": 8 } } }, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1 } }, "df": 5 } } } } }, "z": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 } } } } } } }, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "k": { "docs": { "prefect.flow": { "tf": 2.449489742783178 }, "prefect.Flow": { "tf": 2 }, "prefect.Flow.with_options": { "tf": 2.23606797749979 }, "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.State.result": { "tf": 2.23606797749979 }, "prefect.tags": { "tf": 3 }, "prefect.task": { "tf": 7.280109889280518 }, "prefect.Task": { "tf": 5.830951894845301 }, "prefect.Task.with_options": { "tf": 6.557438524302 }, "prefect.Task.submit": { "tf": 6 }, "prefect.Task.map": { "tf": 5.291502622129181 }, "prefect.Task.serve": { "tf": 3.3166247903554 }, "prefect.pause_flow_run": { "tf": 2.6457513110645907 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 14, "s": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 3.1622776601683795 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 8 } } }, "g": { "docs": { "prefect.tags": { "tf": 1 } }, "df": 1, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 2 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.tags": { "tf": 4.58257569495584 }, "prefect.task": { "tf": 2.6457513110645907 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.serve": { "tf": 1.4142135623730951 } }, "df": 8 } } }, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.State.result": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 13, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.validate_parameters": { "tf": 1.4142135623730951 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 5 }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.State.result": { "tf": 1 } }, "df": 1 } } } } } } } }, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.get_client": { "tf": 1.4142135623730951 }, "prefect.State.result": { "tf": 2.23606797749979 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 10 } }, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 } } } } } }, "e": { "docs": {}, "df": 0, "e": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 }, "a": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 } } } } } }, "w": { "docs": {}, "df": 0, "o": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2 }, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } }, "u": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 3 } } } } } }, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 5 } } }, "e": { "docs": { "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 2, "d": { "docs": { "prefect.Task.with_options": { "tf": 1 } }, "df": 1 } } } } } }, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 2 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1.7320508075688772 }, "prefect.Task.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 14, "d": { "docs": { "prefect.flow": { "tf": 2.6457513110645907 }, "prefect.Flow": { "tf": 2.449489742783178 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 2.449489742783178 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2 }, "prefect.Task.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 10 }, "s": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 }, "r": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.result": { "tf": 1.4142135623730951 } }, "df": 1 } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.serve": { "tf": 1.4142135623730951 } }, "df": 8 } } } }, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2 }, "prefect.suspend_flow_run": { "tf": 2 } }, "df": 4 } } }, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 } }, "df": 2 } } } }, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.State.result": { "tf": 1 } }, "df": 1 } } } } } } } }, "i": { "docs": {}, "df": 0, "q": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } }, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Task.map": { "tf": 1.7320508075688772 } }, "df": 1 } } } } } } }, "l": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.task": { "tf": 1 } }, "df": 2 } } } } } } }, "r": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Flow.from_source": { "tf": 1.4142135623730951 } }, "df": 1 } } }, "r": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2, "u": { "docs": {}, "df": 0, "n": { "docs": { "prefect.allow_failure": { "tf": 1.7320508075688772 }, "prefect.flow": { "tf": 4.123105625617661 }, "prefect.Flow": { "tf": 2.6457513110645907 }, "prefect.Flow.with_options": { "tf": 2.6457513110645907 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 2 }, "prefect.State": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.tags": { "tf": 2 }, "prefect.task": { "tf": 2.8284271247461903 }, "prefect.Task": { "tf": 3 }, "prefect.Task.with_options": { "tf": 2.449489742783178 }, "prefect.Task.submit": { "tf": 2.8284271247461903 }, "prefect.Task.map": { "tf": 2.23606797749979 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 4 }, "prefect.resume_flow_run": { "tf": 2 }, "prefect.suspend_flow_run": { "tf": 3 } }, "df": 21, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 2.6457513110645907 }, "prefect.Flow.serve": { "tf": 2.6457513110645907 }, "prefect.Flow.deploy": { "tf": 2.6457513110645907 }, "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.Task.serve": { "tf": 1 }, "prefect.unmapped": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 15 }, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 2 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.Task.serve": { "tf": 2 }, "prefect.serve": { "tf": 1 } }, "df": 10, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 } }, "df": 2 } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 8 } } } }, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.deploy": { "tf": 1 } }, "df": 6, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } } } } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } } } }, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } }, "e": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 } } } }, "o": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } } }, "r": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "s": { "docs": { "prefect.get_run_logger": { "tf": 1.7320508075688772 } }, "df": 1 } } } } }, "t": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.get_client": { "tf": 1 }, "prefect.State.result": { "tf": 2.6457513110645907 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 2.6457513110645907 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 2.449489742783178 }, "prefect.Task.submit": { "tf": 2.23606797749979 }, "prefect.Task.map": { "tf": 2.23606797749979 }, "prefect.Task.serve": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 12, "s": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 13 }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } }, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2.6457513110645907 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 2.449489742783178 } }, "df": 6 }, "v": { "docs": {}, "df": 0, "e": { "docs": { "prefect.get_client": { "tf": 1 }, "prefect.State.result": { "tf": 1 } }, "df": 2 } } } }, "y": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 3.605551275463989 }, "prefect.Task": { "tf": 3.3166247903554 }, "prefect.Task.with_options": { "tf": 3.872983346207417 } }, "df": 6, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 6 } } } } } }, "s": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.result": { "tf": 1 } }, "df": 1, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 4 } } } } }, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 3.1622776601683795 }, "prefect.Flow": { "tf": 2.8284271247461903 }, "prefect.Flow.with_options": { "tf": 2.6457513110645907 }, "prefect.State.result": { "tf": 4.123105625617661 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.task": { "tf": 3.4641016151377544 }, "prefect.Task": { "tf": 3.4641016151377544 }, "prefect.Task.with_options": { "tf": 2.449489742783178 }, "prefect.Task.submit": { "tf": 1.7320508075688772 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 11, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.State.result": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 11 } } }, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 3, "d": { "docs": { "prefect.pause_flow_run": { "tf": 3 }, "prefect.suspend_flow_run": { "tf": 2.23606797749979 } }, "df": 2 }, "s": { "docs": { "prefect.resume_flow_run": { "tf": 1 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 2 } } } } }, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } } } } } } }, "t": { "docs": { "prefect.get_client": { "tf": 1 } }, "df": 1, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } }, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } } } } }, "e": { "docs": {}, "df": 0, "t": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1.7320508075688772 } }, "df": 1, "d": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } } } } } }, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 3 } }, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Task.with_options": { "tf": 1 } }, "df": 1 } } } } } } } } }, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 2, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.result": { "tf": 1 } }, "df": 1 } } } } } } }, "a": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1 } } } } } } } }, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 3 } } } } }, "m": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.from_source": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.deploy": { "tf": 1 } }, "df": 1 } } } } }, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": { "prefect.suspend_flow_run": { "tf": 1 } }, "df": 1 } } } }, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.from_source": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 3, "/": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 1 } } } } } } } } } }, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } }, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Manifest": { "tf": 1 } }, "df": 1 } } } } } } } } } } } }, "g": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.deploy": { "tf": 1.7320508075688772 } }, "df": 2, "/": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "y": { "docs": { "prefect.deploy": { "tf": 1 } }, "df": 1 } } } } } } } } }, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } } }, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } }, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.result": { "tf": 2.23606797749979 }, "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3, "s": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1.4142135623730951 } }, "df": 4 } } } }, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "m": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 3 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 2 } } } }, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } }, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } } } } } } }, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1 } } } } }, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 } }, "df": 3 } } } } }, "c": { "docs": { "prefect.tags": { "tf": 2 } }, "df": 1, "a": { "docs": {}, "df": 0, "n": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 17, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 5 } } }, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 } }, "df": 3 } } } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 } }, "df": 2 } } } } } } } }, "l": { "docs": {}, "df": 0, "l": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 6, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 2 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 7 } }, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 } }, "df": 4, "s": { "docs": { "prefect.Flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 2.23606797749979 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 5 } } } } }, "s": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.tags": { "tf": 1 } }, "df": 2 }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 5 } } } } }, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 2.6457513110645907 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 2.449489742783178 } }, "df": 5, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2 } }, "df": 4 } } } }, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } }, "t": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1.4142135623730951 } }, "df": 1 } } }, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.unmapped.__init__": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 11, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 3 }, "d": { "docs": { "prefect.Flow.to_deployment": { "tf": 2.23606797749979 }, "prefect.Flow.serve": { "tf": 2.23606797749979 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 }, "prefect.unmapped": { "tf": 1 } }, "df": 4, "/": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } }, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } } } } } } }, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 } }, "df": 3 }, "s": { "docs": { "prefect.flow": { "tf": 1 } }, "df": 1 } } } } }, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.serve": { "tf": 1 } }, "df": 4 } } }, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 } }, "df": 3 } } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 10 } } } }, "e": { "docs": {}, "df": 0, "x": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 2 }, "prefect.State.result": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 } }, "df": 6, "u": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } }, "s": { "docs": { "prefect.tags": { "tf": 1 } }, "df": 1 } } } } }, "c": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "k": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 } }, "df": 3 } } } } } } } } } }, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2 } } } } } } } } }, "f": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "m": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } } }, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 }, "d": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 5 } } } } } } }, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1.4142135623730951 }, "prefect.State.to_state_create": { "tf": 1 } }, "df": 2, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } } } } } }, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Task.serve": { "tf": 1 } }, "df": 1 } } } } } } }, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 1 } } } }, "o": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.serve": { "tf": 1 } }, "df": 1 } } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } } } } }, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } }, "s": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 4 } } } } } } } } } } }, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 8 } } }, "e": { "docs": { "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 3, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 7 } } } } }, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.State.result": { "tf": 1 } }, "df": 2 } } } } }, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } } } } } }, "/": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "/": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": { "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 3 } } } } } } } }, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "q": { "docs": {}, "df": 0, "/": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "/": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "/": { "1": { "7": { "4": { "docs": { "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 1 }, "docs": {}, "df": 0 }, "docs": {}, "df": 0 }, "docs": {}, "df": 0 } } } } } } } } } } } } } } } } } } } } } }, "m": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.get_client": { "tf": 1 } }, "df": 1 } } } } } } } } } }, "b": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 3 } } } } } }, "d": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.from_source": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 4, "h": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } }, "p": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } } } }, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } } }, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "k": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Task.map": { "tf": 2 } }, "df": 3, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } } } }, "o": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 4 } } }, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 2 } } } }, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } }, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.tags": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 8 } } } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "m": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 4, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } } } }, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.serve": { "tf": 5.916079783099616 }, "prefect.Flow.from_source": { "tf": 6.782329983125268 }, "prefect.Flow.deploy": { "tf": 6.082762530298219 }, "prefect.Task": { "tf": 1 } }, "df": 4 } } }, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_client": { "tf": 3.3166247903554 } }, "df": 1 } } } } } }, "b": { "docs": { "prefect.tags": { "tf": 3 }, "prefect.task": { "tf": 1 } }, "df": 2, "e": { "docs": { "prefect.allow_failure": { "tf": 1.4142135623730951 }, "prefect.flow": { "tf": 4.898979485566356 }, "prefect.Flow": { "tf": 4.242640687119285 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 2.23606797749979 }, "prefect.Flow.serve": { "tf": 2.449489742783178 }, "prefect.Flow.deploy": { "tf": 2.449489742783178 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 2.449489742783178 }, "prefect.State.to_state_create": { "tf": 1.4142135623730951 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 3.7416573867739413 }, "prefect.Task": { "tf": 3.605551275463989 }, "prefect.Task.with_options": { "tf": 2.23606797749979 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 2.23606797749979 }, "prefect.Task.serve": { "tf": 1 }, "prefect.unmapped": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2.8284271247461903 }, "prefect.suspend_flow_run": { "tf": 2.23606797749979 } }, "df": 24, "f": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 10 } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.unmapped": { "tf": 1 } }, "df": 7 } } }, "e": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 2 } }, "t": { "docs": {}, "df": 0, "w": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 4 } } } } }, "h": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } } } }, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 2, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 } }, "df": 2 } } } } } } } } } }, "d": { "docs": { "prefect.task": { "tf": 1 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } }, "c": { "docs": {}, "df": 0, "k": { "docs": {}, "df": 0, "w": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.result": { "tf": 1 } }, "df": 1 } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } } } }, "y": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.validate_parameters": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 10, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1.4142135623730951 } }, "df": 2 } }, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "k": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 6, "s": { "docs": { "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 3 }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 1 } } } } } } }, "u": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.deploy": { "tf": 2.449489742783178 }, "prefect.deploy": { "tf": 2.449489742783178 } }, "df": 2, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } }, "t": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } }, "t": { "docs": { "prefect.State.result": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.task": { "tf": 1 } }, "df": 3 } }, "o": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": { "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1 } }, "df": 2 } } } } } } }, "g": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.allow_failure": { "tf": 1 } }, "df": 1 } } }, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.visualize": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 4 } } } }, "i": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } } }, "t": { "docs": { "prefect.get_client": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 2.23606797749979 } }, "df": 3 } }, "i": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 11 } } }, "t": { "docs": { "prefect.Flow.from_source": { "tf": 2.23606797749979 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 3, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.from_source": { "tf": 1.7320508075688772 } }, "df": 1 } } } } } } } } } }, "h": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "b": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } } }, "t": { "docs": { "prefect.flow": { "tf": 7.3484692283495345 }, "prefect.Flow.with_options": { "tf": 6.244997998398398 }, "prefect.Flow.serve": { "tf": 8.717797887081348 }, "prefect.Flow.from_source": { "tf": 9.899494936611665 }, "prefect.Flow.deploy": { "tf": 8.94427190999916 }, "prefect.State.result": { "tf": 9 }, "prefect.tags": { "tf": 8.12403840463596 }, "prefect.task": { "tf": 8.888194417315589 }, "prefect.Task.with_options": { "tf": 8.366600265340756 }, "prefect.Task.submit": { "tf": 10.954451150103322 }, "prefect.Task.map": { "tf": 12.84523257866513 }, "prefect.Task.serve": { "tf": 1.7320508075688772 } }, "df": 12 }, "o": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1.4142135623730951 } }, "df": 2 } } } } } }, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": { "prefect.Flow.visualize": { "tf": 1.4142135623730951 } }, "df": 1, "e": { "docs": {}, "df": 0, "x": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } } } } } } } } } } } } } } } } }, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } } } } } }, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "p": { "docs": { "prefect.deploy": { "tf": 1 } }, "df": 1 } } } }, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } } } } }, "p": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Flow.serve": { "tf": 2.8284271247461903 }, "prefect.Flow.from_source": { "tf": 3.3166247903554 }, "prefect.Flow.deploy": { "tf": 3.1622776601683795 }, "prefect.Task": { "tf": 1 } }, "df": 5, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 2.6457513110645907 }, "prefect.Flow": { "tf": 2 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.from_source": { "tf": 2.449489742783178 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 }, "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1.7320508075688772 }, "prefect.State.result": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.submit": { "tf": 2 }, "prefect.Task.map": { "tf": 2 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.7320508075688772 } }, "df": 19 } } }, "i": { "docs": {}, "df": 0, "x": { "docs": { "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 1 } } }, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } } }, "p": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2 } } } }, "v": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "s": { "docs": { "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 } }, "df": 2 } } } }, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2, "s": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } } }, "o": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": { "prefect.resume_flow_run": { "tf": 1 } }, "df": 1, "d": { "docs": { "prefect.flow": { "tf": 2.6457513110645907 }, "prefect.Flow": { "tf": 2.6457513110645907 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 2 }, "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.serve": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 16 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } } }, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "s": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 2 }, "prefect.deploy": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 14, "s": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 5 } } }, "v": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } } } }, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.get_client": { "tf": 1 }, "prefect.tags": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1.7320508075688772 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 11, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 4 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } }, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 6, "s": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 1.7320508075688772 }, "prefect.Flow.validate_parameters": { "tf": 1.7320508075688772 }, "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1.4142135623730951 } }, "df": 10 }, "t": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } } }, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } } }, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 2 }, "prefect.Flow": { "tf": 1.4142135623730951 } }, "df": 2 } } } }, "u": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 2.449489742783178 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 4, "d": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 } }, "df": 5 }, "s": { "docs": { "prefect.pause_flow_run": { "tf": 2.23606797749979 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 1 } } } } }, "t": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 4, "/": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "/": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } } } } } } } } }, "y": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 4 } } } } } }, ":": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } }, "m": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 3 } } } }, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 4 } } } }, "r": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } }, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } } }, "s": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 2, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 2 } } } } } }, "o": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 4 } } }, "o": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Flow.to_deployment": { "tf": 2.23606797749979 }, "prefect.Flow.deploy": { "tf": 3.7416573867739413 }, "prefect.deploy": { "tf": 2.6457513110645907 } }, "df": 3 } }, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "y": { "docs": { "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 3 } } }, "l": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } } } } } }, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 8, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.State.result": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 }, "n": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } } } } } } }, "d": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } }, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } }, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 2 } } } } } }, "n": { "docs": { "prefect.Flow.serve": { "tf": 2.449489742783178 }, "prefect.Flow.from_source": { "tf": 3.7416573867739413 }, "prefect.Flow.deploy": { "tf": 2.8284271247461903 }, "prefect.Task.map": { "tf": 2.23606797749979 } }, "df": 4, "o": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 5, "t": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 2.449489742783178 }, "prefect.Flow": { "tf": 2.449489742783178 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 2.8284271247461903 }, "prefect.Flow.serve": { "tf": 3 }, "prefect.Flow.deploy": { "tf": 3.4641016151377544 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.7320508075688772 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.7320508075688772 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 20, "e": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 4, "b": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "k": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } } } }, "n": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1, "z": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } }, "e": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 2 }, "prefect.Task": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 5 } }, "w": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 } }, "e": { "docs": {}, "df": 0, "w": { "docs": { "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 4.795831523312719 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 5 }, "prefect.Task.submit": { "tf": 1 }, "prefect.unmapped.__init__": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 12 }, "x": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 6 } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } }, "s": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.tags": { "tf": 1 } }, "df": 1 } } } }, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } }, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 2.8284271247461903 }, "prefect.Flow": { "tf": 2.449489742783178 }, "prefect.Flow.with_options": { "tf": 2.8284271247461903 }, "prefect.Flow.to_deployment": { "tf": 3.3166247903554 }, "prefect.Flow.serve": { "tf": 3 }, "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 3.7416573867739413 }, "prefect.task": { "tf": 2.8284271247461903 }, "prefect.Task": { "tf": 2.449489742783178 }, "prefect.Task.with_options": { "tf": 3.1622776601683795 }, "prefect.serve": { "tf": 2.23606797749979 }, "prefect.deploy": { "tf": 2.8284271247461903 } }, "df": 12, "d": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } } }, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } }, "m": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 1.7320508075688772 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2.449489742783178 }, "prefect.Task": { "tf": 2.449489742783178 }, "prefect.Task.with_options": { "tf": 2.23606797749979 }, "prefect.serve": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 12, "s": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 2 } } } } } }, "n": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 }, "d": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1 }, "f": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1 }, "b": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1 } }, "a": { "docs": { "prefect.allow_failure": { "tf": 1.7320508075688772 }, "prefect.flow": { "tf": 4.58257569495584 }, "prefect.Flow": { "tf": 4.123105625617661 }, "prefect.Flow.with_options": { "tf": 5.385164807134504 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Flow.serialize_parameters": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 3.872983346207417 }, "prefect.Flow.serve": { "tf": 4.47213595499958 }, "prefect.Flow.from_source": { "tf": 3.605551275463989 }, "prefect.Flow.deploy": { "tf": 5.291502622129181 }, "prefect.Flow.visualize": { "tf": 1.7320508075688772 }, "prefect.get_client": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.Manifest": { "tf": 1.4142135623730951 }, "prefect.State": { "tf": 1 }, "prefect.State.result": { "tf": 2.8284271247461903 }, "prefect.State.to_state_create": { "tf": 1.7320508075688772 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.tags": { "tf": 3.4641016151377544 }, "prefect.task": { "tf": 6.244997998398398 }, "prefect.Task": { "tf": 5.291502622129181 }, "prefect.Task.with_options": { "tf": 6.244997998398398 }, "prefect.Task.submit": { "tf": 3.7416573867739413 }, "prefect.Task.map": { "tf": 3.4641016151377544 }, "prefect.Task.serve": { "tf": 1.4142135623730951 }, "prefect.unmapped": { "tf": 1 }, "prefect.serve": { "tf": 1.7320508075688772 }, "prefect.deploy": { "tf": 3.4641016151377544 }, "prefect.pause_flow_run": { "tf": 2.6457513110645907 }, "prefect.resume_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 2.449489742783178 } }, "df": 31, "l": { "docs": {}, "df": 0, "l": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.map": { "tf": 1.7320508075688772 }, "prefect.unmapped": { "tf": 1 } }, "df": 5, "o": { "docs": {}, "df": 0, "w": { "docs": { "prefect.allow_failure": { "tf": 1 } }, "df": 1, "s": { "docs": { "prefect.allow_failure": { "tf": 1 } }, "df": 1 }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } } } }, "s": { "docs": {}, "df": 0, "o": { "docs": { "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.Task.map": { "tf": 1 } }, "df": 3 } }, "w": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "s": { "docs": { "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 4 } } } }, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "y": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } } }, "n": { "docs": { "prefect.flow": { "tf": 4.242640687119285 }, "prefect.Flow": { "tf": 4.123105625617661 }, "prefect.Flow.with_options": { "tf": 2 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 2.23606797749979 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 2.6457513110645907 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 2 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 4.358898943540674 }, "prefect.Task": { "tf": 4.358898943540674 }, "prefect.Task.with_options": { "tf": 2.449489742783178 }, "prefect.Task.submit": { "tf": 2 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 19, "y": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1.7320508075688772 } }, "df": 10 }, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.allow_failure": { "tf": 1 } }, "df": 1, "s": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 } }, "df": 1 } } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } } } } } } }, "d": { "docs": { "prefect.flow": { "tf": 2.8284271247461903 }, "prefect.Flow": { "tf": 2 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 2.23606797749979 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 2.23606797749979 }, "prefect.pause_flow_run": { "tf": 2.449489742783178 }, "prefect.suspend_flow_run": { "tf": 2 } }, "df": 21 } }, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 2 } }, "df": 11 }, "g": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 19 }, "u": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1, "s": { "docs": { "prefect.flow": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.7320508075688772 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 10 } } } } } } } }, "t": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 11, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.State.result": { "tf": 1 } }, "df": 3 } } } } }, "e": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 4, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 } }, "df": 1 } } }, "s": { "docs": { "prefect.task": { "tf": 1 } }, "df": 1 } } } } } } }, "s": { "docs": { "prefect.flow": { "tf": 3.1622776601683795 }, "prefect.Flow": { "tf": 3 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 2.23606797749979 }, "prefect.Flow.serve": { "tf": 2.23606797749979 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 }, "prefect.Flow.visualize": { "tf": 1 }, "prefect.get_client": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.map": { "tf": 2.449489742783178 }, "prefect.unmapped": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 16, "y": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.get_client": { "tf": 1 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 3 }, "prefect.Task.map": { "tf": 1 } }, "df": 6, "h": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1 } }, "df": 5 } } } } } } } } } }, "s": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.with_options": { "tf": 1 } }, "df": 1 } } }, "o": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3, "d": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 5 } } } } } } } } }, "f": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 10 } } } }, "p": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } }, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 } }, "df": 1 } } } } } } } } }, "i": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.get_client": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1.4142135623730951 }, "prefect.State.copy": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 11, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } } } } } } } } } } }, "c": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 2 } }, "df": 1, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 2 } }, "df": 3 } } }, "s": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.from_source": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1 } }, "df": 3 } } } }, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 } }, "df": 3 }, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } }, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 } } } } }, "d": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 4, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1, "a": { "docs": {}, "df": 0, "l": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 5 } } } } } } } } }, "m": { "docs": { "prefect.serve": { "tf": 1 } }, "df": 1, "p": { "docs": { "prefect.Flow.serve": { "tf": 2.449489742783178 }, "prefect.Flow.from_source": { "tf": 3 }, "prefect.Flow.deploy": { "tf": 3.4641016151377544 }, "prefect.deploy": { "tf": 1 } }, "df": 4 }, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } } }, "v": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 } }, "df": 2 } } } } } } }, "o": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "d": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } }, "b": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 } }, "df": 2 } } } }, "w": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.get_client": { "tf": 1 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 } }, "df": 3, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } } } }, "g": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": { "prefect.State.copy": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 3 } } } }, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.serve": { "tf": 1 } }, "df": 1 } } } } } } } } } } } } }, "d": { "docs": { "prefect.tags": { "tf": 2 } }, "df": 1, "o": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2, "w": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": { "prefect.allow_failure": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 2 } } } } } } } }, "c": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } } } } } }, "k": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 2, "f": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } } } }, "t": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 }, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } }, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1.4142135623730951 } }, "df": 2, "]": { "docs": {}, "df": 0, "[": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "f": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } } } } } } } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } } } } } } }, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.task": { "tf": 1 } }, "df": 2 } } } } } }, "c": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 2 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 9 } } } } } } } } }, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "y": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 2.23606797749979 } }, "df": 6, "s": { "docs": { "prefect.task": { "tf": 1.7320508075688772 }, "prefect.Task": { "tf": 1.7320508075688772 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 } }, "df": 3 } } } }, "f": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.State.result": { "tf": 2.449489742783178 }, "prefect.tags": { "tf": 2.23606797749979 }, "prefect.task": { "tf": 2.449489742783178 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 3.1622776601683795 }, "prefect.Task.map": { "tf": 3.3166247903554 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 15, "a": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 2.6457513110645907 }, "prefect.Flow": { "tf": 2.449489742783178 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 }, "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.State.copy": { "tf": 1 }, "prefect.Task.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 10, "s": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2.6457513110645907 }, "prefect.Task": { "tf": 2.6457513110645907 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.deploy": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 12 } } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2.449489742783178 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 7, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 5 }, "s": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } }, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1.4142135623730951 } }, "df": 3 } } } }, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1 } }, "df": 5 } } } } } }, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 4 } } }, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "y": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } } } }, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.to_deployment": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 2 }, "prefect.serve": { "tf": 2 }, "prefect.deploy": { "tf": 2.6457513110645907 } }, "df": 4, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.to_deployment": { "tf": 4.69041575982343 }, "prefect.Flow.serve": { "tf": 4.47213595499958 }, "prefect.Flow.deploy": { "tf": 4.795831523312719 }, "prefect.serve": { "tf": 1.7320508075688772 }, "prefect.deploy": { "tf": 2 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 7, "s": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.serve": { "tf": 1.7320508075688772 }, "prefect.deploy": { "tf": 2.6457513110645907 } }, "df": 4 }, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } } } } }, "s": { "docs": { "prefect.Flow.deploy": { "tf": 1 } }, "df": 1 }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } } }, "v": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1.4142135623730951 } }, "df": 2 } }, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } } } } }, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 } }, "df": 2 } } } } } }, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 1 } } } } }, "c": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 } }, "df": 1, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.resume_flow_run": { "tf": 1 } }, "df": 4 } } } } } } } }, "r": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } } } } }, "v": { "docs": { "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 } }, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "k": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 } }, "df": 1, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "k": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 } }, "df": 1 } } } } } } } } } } } }, "t": { "docs": {}, "df": 0, "a": { "docs": { "prefect.get_run_logger": { "tf": 1 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.State.to_state_create": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 5, "b": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } } } }, "e": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": { "prefect.task": { "tf": 1 }, "prefect.serve": { "tf": 1.4142135623730951 } }, "df": 2 } } } } } }, "y": { "docs": { "prefect.task": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2, "s": { "docs": { "prefect.task": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2 } } }, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.unmapped": { "tf": 1 } }, "df": 2 } } } } }, "y": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "c": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } } }, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "p": { "docs": { "prefect.State.to_state_create": { "tf": 1 } }, "df": 1 } } } }, "y": { "docs": { "prefect.flow": { "tf": 2 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 5, "o": { "docs": {}, "df": 0, "u": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 3, "r": { "docs": { "prefect.allow_failure": { "tf": 1 } }, "df": 1 } } }, "i": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "s": { "docs": { "prefect.tags": { "tf": 1 } }, "df": 1 } } } } } }, "e": { "docs": { "prefect.tags": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 3, "x": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.allow_failure": { "tf": 1 }, "prefect.State.result": { "tf": 1.7320508075688772 } }, "df": 2 } } } } }, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 6 } } } }, "h": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } } } }, "e": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2.23606797749979 }, "prefect.suspend_flow_run": { "tf": 1.7320508075688772 } }, "df": 11 } }, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } }, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 2.23606797749979 }, "prefect.Flow.serve": { "tf": 2.23606797749979 }, "prefect.Flow.deploy": { "tf": 2.23606797749979 }, "prefect.Task.serve": { "tf": 1 } }, "df": 4, "d": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2 } }, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.visualize": { "tf": 1 } }, "df": 1 } } } } } } } }, "a": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.get_client": { "tf": 1 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 7, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.State.result": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 15 } } } } } }, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.tags": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.deploy": { "tf": 1 } }, "df": 5 } } } } }, "t": { "docs": { "prefect.pause_flow_run": { "tf": 1.4142135623730951 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } }, "p": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 3 } } } } }, "e": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } } }, "a": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": { "prefect.flow": { "tf": 2 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1 } }, "df": 6 } } }, "m": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "y": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.task": { "tf": 1 } }, "df": 2 } } } }, "n": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.from_source": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 2 }, "prefect.Task": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 7 } } } } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 2.23606797749979 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 } }, "df": 5 } } } }, "g": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 } }, "df": 3 } } } }, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.with_options": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 } }, "df": 2 } } } } } }, "c": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } } } }, "f": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 5 } } } } }, "s": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } }, "v": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } } } } } } } }, "d": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } }, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2, "t": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } } } } } }, "n": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } }, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.from_source": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 6 } } } } }, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.State.result": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 } }, "df": 2, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.State.result": { "tf": 1 } }, "df": 1 } } } } } } }, "s": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Task.serve": { "tf": 1 } }, "df": 1 } } } } } } } }, "l": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1 } }, "df": 1 } } } }, "v": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.allow_failure.__init__": { "tf": 1 }, "prefect.flow": { "tf": 2 }, "prefect.Flow": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.unmapped.__init__": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 11, "s": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 5 }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.State.result": { "tf": 2 }, "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } } } }, "i": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 } }, "df": 1, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.validate_parameters": { "tf": 1 } }, "df": 4, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } }, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } } } } } } } }, "r": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.with_options": { "tf": 1 } }, "df": 1, "s": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1 }, "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 8 } } } } } } } }, "e": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.flow": { "tf": 2.449489742783178 }, "prefect.Flow": { "tf": 2 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 } }, "df": 8 } } } } } }, "m": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 2 }, "i": { "docs": {}, "df": 0, "a": { "docs": { "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 2 }, "s": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "z": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.visualize": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 4 } } } } } } } }, "z": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } }, "m": { "docs": { "prefect.deploy": { "tf": 1 } }, "df": 1, "a": { "docs": {}, "df": 0, "y": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1 }, "prefect.task": { "tf": 1 } }, "df": 4 }, "x": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "m": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "m": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 7 } } } } }, "r": { "docs": {}, "df": 0, "k": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 4 } } } }, "i": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 5, "t": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "s": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } } } } } }, "n": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.get_client": { "tf": 1 } }, "df": 1 }, "r": { "docs": { "prefect.tags": { "tf": 1 } }, "df": 1 } } } }, "y": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 } }, "t": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "s": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } } } } } }, "k": { "docs": {}, "df": 0, "e": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 2 } }, "p": { "docs": { "prefect.Task.map": { "tf": 2.449489742783178 } }, "df": 1, "p": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Task.map": { "tf": 2.449489742783178 } }, "df": 1 } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.unmapped": { "tf": 1 } }, "df": 1 } } } } } }, "u": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.get_client": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 2 } }, "df": 4 } }, "l": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "p": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 2 } }, "df": 3 } } } } } } }, "e": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 2, "m": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "y": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 } }, "df": 2 } } } }, "s": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.4142135623730951 } }, "df": 4 } } } } }, "t": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.deploy": { "tf": 1 }, "prefect.State.to_state_create": { "tf": 1 }, "prefect.Task.serve": { "tf": 1 } }, "df": 3 } } }, "a": { "docs": {}, "df": 0, "d": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "a": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } } } } }, "r": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Task.with_options": { "tf": 1 } }, "df": 1 } } } } }, "y": { "docs": { "prefect.flow": { "tf": 1.7320508075688772 }, "prefect.Flow.with_options": { "tf": 2.449489742783178 }, "prefect.Flow.to_deployment": { "tf": 2 }, "prefect.Flow.serve": { "tf": 2 }, "prefect.Flow.from_source": { "tf": 2 }, "prefect.Flow.deploy": { "tf": 2.449489742783178 }, "prefect.State.result": { "tf": 3.4641016151377544 }, "prefect.tags": { "tf": 3.1622776601683795 }, "prefect.task": { "tf": 2.23606797749979 }, "prefect.Task.with_options": { "tf": 3.3166247903554 }, "prefect.Task.submit": { "tf": 3.7416573867739413 }, "prefect.Task.map": { "tf": 3.7416573867739413 }, "prefect.Task.serve": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 2 }, "prefect.deploy": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 } }, "df": 16 }, "o": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 2 } }, "d": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.4142135623730951 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 } } }, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "s": { "docs": { "prefect.State.copy": { "tf": 1 } }, "df": 1 } } } }, "n": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1 } } } } } } } }, "r": { "docs": {}, "df": 0, "e": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } }, "i": { "docs": { "prefect.Flow.serve": { "tf": 1 } }, "df": 1, "g": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "t": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } }, "h": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": { "prefect.tags": { "tf": 2 }, "prefect.pause_flow_run": { "tf": 1.4142135623730951 } }, "df": 2, "h": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.task": { "tf": 1.4142135623730951 } }, "df": 3 } }, "v": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.validate_parameters": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 2 }, "prefect.suspend_flow_run": { "tf": 1.4142135623730951 } }, "df": 4 } } }, "e": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "o": { "docs": { "prefect.Flow.to_deployment": { "tf": 2 }, "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.get_client": { "tf": 1.4142135623730951 }, "prefect.State.result": { "tf": 2.449489742783178 }, "prefect.task": { "tf": 1 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 2 } }, "df": 8 } } }, "r": { "docs": {}, "df": 0, "d": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } }, "o": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 3 } }, "w": { "docs": { "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3, "e": { "docs": {}, "df": 0, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 } }, "df": 2 } } } } } }, "t": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "p": { "docs": { "prefect.get_client": { "tf": 1 } }, "df": 1, "s": { "docs": {}, "df": 0, ":": { "docs": {}, "df": 0, "/": { "docs": {}, "df": 0, "/": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "h": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "b": { "docs": { "prefect.Flow.from_source": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.State.default_scheduled_start_time": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 4 } } } } } } } } } } } } } }, "l": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 } }, "df": 2 } } }, "l": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 }, "prefect.deploy": { "tf": 1.7320508075688772 } }, "df": 4, "l": { "docs": {}, "df": 0, "y": { "docs": { "prefect.State.to_state_create": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 2 } } } } }, "a": { "docs": {}, "df": 0, "d": { "docs": { "prefect.Flow.from_source": { "tf": 1.7320508075688772 } }, "df": 1, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.Flow": { "tf": 1.4142135623730951 }, "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 4 } }, "s": { "docs": { "prefect.Flow.from_source": { "tf": 1 } }, "df": 1 } } }, "g": { "docs": { "prefect.flow": { "tf": 1.4142135623730951 }, "prefect.get_run_logger": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.deploy": { "tf": 1 } }, "df": 6, "g": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.flow": { "tf": 1 }, "prefect.get_run_logger": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 } }, "df": 4, "s": { "docs": { "prefect.get_run_logger": { "tf": 1 } }, "df": 1 } } }, "i": { "docs": {}, "df": 0, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.flow": { "tf": 1 } }, "df": 1 } } } } }, "n": { "docs": {}, "df": 0, "g": { "docs": { "prefect.task": { "tf": 1.4142135623730951 }, "prefect.Task": { "tf": 1.4142135623730951 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } }, "i": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow": { "tf": 2.23606797749979 }, "prefect.Flow.with_options": { "tf": 2.23606797749979 }, "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.serve": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 }, "prefect.task": { "tf": 2.449489742783178 }, "prefect.Task": { "tf": 2.449489742783178 }, "prefect.Task.with_options": { "tf": 2.449489742783178 }, "prefect.Task.map": { "tf": 1.4142135623730951 }, "prefect.serve": { "tf": 1.4142135623730951 }, "prefect.deploy": { "tf": 1.7320508075688772 } }, "df": 12, "e": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Task.serve": { "tf": 1 } }, "df": 1 } } } }, "k": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 } }, "m": { "docs": {}, "df": 0, "i": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.serve": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 2 } } } }, "t": { "docs": { "prefect.Flow.serve": { "tf": 8.717797887081348 }, "prefect.Flow.from_source": { "tf": 9.899494936611665 }, "prefect.Flow.deploy": { "tf": 8.94427190999916 } }, "df": 3 }, "e": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Task.map": { "tf": 1 } }, "df": 1 } } }, "n": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "h": { "docs": { "prefect.Task.map": { "tf": 1.4142135623730951 } }, "df": 1 } } } }, "v": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "l": { "docs": { "prefect.pause_flow_run": { "tf": 1 }, "prefect.suspend_flow_run": { "tf": 1 } }, "df": 2 } } } } }, "x": { "docs": { "prefect.flow": { "tf": 2.23606797749979 }, "prefect.Flow": { "tf": 1 }, "prefect.Flow.with_options": { "tf": 1.4142135623730951 }, "prefect.State.result": { "tf": 1.4142135623730951 }, "prefect.task": { "tf": 2.6457513110645907 }, "prefect.Task.with_options": { "tf": 1.7320508075688772 }, "prefect.Task.submit": { "tf": 1.4142135623730951 }, "prefect.Task.map": { "tf": 2.23606797749979 } }, "df": 8 }, "j": { "docs": {}, "df": 0, "s": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "n": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 }, "prefect.Manifest": { "tf": 1 } }, "df": 2, "a": { "docs": {}, "df": 0, "b": { "docs": {}, "df": 0, "l": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.serialize_parameters": { "tf": 1 } }, "df": 1 } } } } } } }, "o": { "docs": {}, "df": 0, "b": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 } }, "df": 2 } }, "i": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "t": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "r": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3, "e": { "docs": {}, "df": 0, "d": { "docs": { "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 } }, "df": 3 } } } } } } } }, "k": { "docs": { "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 2, "i": { "docs": {}, "df": 0, "c": { "docs": {}, "df": 0, "k": { "docs": { "prefect.Flow.to_deployment": { "tf": 1 }, "prefect.Flow.serve": { "tf": 1 }, "prefect.Flow.deploy": { "tf": 1 } }, "df": 3 } } }, "n": { "docs": { "prefect.Flow.serve": { "tf": 1.4142135623730951 }, "prefect.Flow.from_source": { "tf": 2.449489742783178 }, "prefect.Flow.deploy": { "tf": 1.4142135623730951 } }, "df": 3 }, "w": { "docs": {}, "df": 0, "a": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "g": { "docs": {}, "df": 0, "s": { "docs": { "prefect.get_run_logger": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 4 } } } } }, "e": { "docs": {}, "df": 0, "y": { "docs": { "prefect.task": { "tf": 2.8284271247461903 }, "prefect.Task": { "tf": 2.6457513110645907 }, "prefect.Task.with_options": { "tf": 2 }, "prefect.pause_flow_run": { "tf": 1.7320508075688772 }, "prefect.suspend_flow_run": { "tf": 1.7320508075688772 } }, "df": 5, "w": { "docs": {}, "df": 0, "o": { "docs": {}, "df": 0, "r": { "docs": {}, "df": 0, "d": { "docs": { "prefect.get_run_logger": { "tf": 1 }, "prefect.task": { "tf": 1 }, "prefect.Task": { "tf": 1 }, "prefect.Task.with_options": { "tf": 1 }, "prefect.Task.submit": { "tf": 1 }, "prefect.Task.map": { "tf": 1 }, "prefect.serve": { "tf": 1 } }, "df": 7 } } } } } } }, "q": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": {}, "df": 0, "u": { "docs": {}, "df": 0, "e": { "docs": { "prefect.Flow.to_deployment": { "tf": 1.7320508075688772 }, "prefect.Flow.deploy": { "tf": 1.7320508075688772 } }, "df": 2 } } }, "o": { "docs": {}, "df": 0, "t": { "docs": { "prefect.Flow.to_deployment": { "tf": 3.7416573867739413 }, "prefect.Flow.serve": { "tf": 3.4641016151377544 }, "prefect.Flow.from_source": { "tf": 3.4641016151377544 }, "prefect.Flow.deploy": { "tf": 4.69041575982343 }, "prefect.serve": { "tf": 4 }, "prefect.deploy": { "tf": 4 }, "prefect.pause_flow_run": { "tf": 2 } }, "df": 7 } } } } } } }, "pipeline": ["trimmer"], "_isPrebuiltIndex": true }; + + // mirrored in build-search-index.js (part 1) + // Also split on html tags. this is a cheap heuristic, but good enough. + elasticlunr.tokenizer.setSeperator(//[\s\-.;& _'"=,()]+|<[^>]*>/); + + let searchIndex; + if (docs._isPrebuiltIndex) { + console.info("using precompiled search index"); + searchIndex = elasticlunr.Index.load(docs); + } else { + console.time("building search index"); + // mirrored in build-search-index.js (part 2) + searchIndex = elasticlunr(function () { + this.pipeline.remove(elasticlunr.stemmer); + this.pipeline.remove(elasticlunr.stopWordFilter); + this.addField("qualname"); + this.addField("fullname"); + this.addField("annotation"); + this.addField("default_value"); + this.addField("signature"); + this.addField("bases"); + this.addField("doc"); + this.setRef("fullname"); + }); + for (let doc of docs) { + searchIndex.addDoc(doc); + } + console.timeEnd("building search index"); + } + + return (term) => searchIndex.search(term, { + fields: { + qualname: { boost: 4 }, + fullname: { boost: 2 }, + annotation: { boost: 2 }, + default_value: { boost: 2 }, + signature: { boost: 2 }, + bases: { boost: 2 }, + doc: { boost: 1 }, + }, + expand: true + }); + }) (); \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/admin/clear-database.mdx b/docs/3.0rc/api-ref/server/admin/clear-database.mdx new file mode 100644 index 000000000000..a8f4ed9ce4a7 --- /dev/null +++ b/docs/3.0rc/api-ref/server/admin/clear-database.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/admin/database/clear +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/admin/create-database.mdx b/docs/3.0rc/api-ref/server/admin/create-database.mdx new file mode 100644 index 000000000000..8e62f8be63c1 --- /dev/null +++ b/docs/3.0rc/api-ref/server/admin/create-database.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/admin/database/create +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/admin/drop-database.mdx b/docs/3.0rc/api-ref/server/admin/drop-database.mdx new file mode 100644 index 000000000000..d3d0f2f96618 --- /dev/null +++ b/docs/3.0rc/api-ref/server/admin/drop-database.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/admin/database/drop +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/admin/read-settings.mdx b/docs/3.0rc/api-ref/server/admin/read-settings.mdx new file mode 100644 index 000000000000..f35492ba4af3 --- /dev/null +++ b/docs/3.0rc/api-ref/server/admin/read-settings.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/admin/settings +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/admin/read-version.mdx b/docs/3.0rc/api-ref/server/admin/read-version.mdx new file mode 100644 index 000000000000..d092efba5ae5 --- /dev/null +++ b/docs/3.0rc/api-ref/server/admin/read-version.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/admin/version +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/artifacts/count-artifacts.mdx b/docs/3.0rc/api-ref/server/artifacts/count-artifacts.mdx new file mode 100644 index 000000000000..b7bff656ddbe --- /dev/null +++ b/docs/3.0rc/api-ref/server/artifacts/count-artifacts.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/artifacts/count +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/artifacts/count-latest-artifacts.mdx b/docs/3.0rc/api-ref/server/artifacts/count-latest-artifacts.mdx new file mode 100644 index 000000000000..ea25802720e7 --- /dev/null +++ b/docs/3.0rc/api-ref/server/artifacts/count-latest-artifacts.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/artifacts/latest/count +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/artifacts/create-artifact.mdx b/docs/3.0rc/api-ref/server/artifacts/create-artifact.mdx new file mode 100644 index 000000000000..c3a84f0fa26b --- /dev/null +++ b/docs/3.0rc/api-ref/server/artifacts/create-artifact.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/artifacts/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/artifacts/delete-artifact.mdx b/docs/3.0rc/api-ref/server/artifacts/delete-artifact.mdx new file mode 100644 index 000000000000..05121a4cff21 --- /dev/null +++ b/docs/3.0rc/api-ref/server/artifacts/delete-artifact.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/artifacts/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/artifacts/read-artifact.mdx b/docs/3.0rc/api-ref/server/artifacts/read-artifact.mdx new file mode 100644 index 000000000000..22d006d364f9 --- /dev/null +++ b/docs/3.0rc/api-ref/server/artifacts/read-artifact.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/artifacts/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/artifacts/read-artifacts.mdx b/docs/3.0rc/api-ref/server/artifacts/read-artifacts.mdx new file mode 100644 index 000000000000..3389af22e395 --- /dev/null +++ b/docs/3.0rc/api-ref/server/artifacts/read-artifacts.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/artifacts/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/artifacts/read-latest-artifact.mdx b/docs/3.0rc/api-ref/server/artifacts/read-latest-artifact.mdx new file mode 100644 index 000000000000..bf6e2a56259e --- /dev/null +++ b/docs/3.0rc/api-ref/server/artifacts/read-latest-artifact.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/artifacts/{key}/latest +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/artifacts/read-latest-artifacts.mdx b/docs/3.0rc/api-ref/server/artifacts/read-latest-artifacts.mdx new file mode 100644 index 000000000000..96caf4dc64f6 --- /dev/null +++ b/docs/3.0rc/api-ref/server/artifacts/read-latest-artifacts.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/artifacts/latest/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/artifacts/update-artifact.mdx b/docs/3.0rc/api-ref/server/artifacts/update-artifact.mdx new file mode 100644 index 000000000000..14606dbf57a2 --- /dev/null +++ b/docs/3.0rc/api-ref/server/artifacts/update-artifact.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/artifacts/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/automations/count-automations.mdx b/docs/3.0rc/api-ref/server/automations/count-automations.mdx new file mode 100644 index 000000000000..2eeb47e79983 --- /dev/null +++ b/docs/3.0rc/api-ref/server/automations/count-automations.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/automations/count +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/automations/create-automation.mdx b/docs/3.0rc/api-ref/server/automations/create-automation.mdx new file mode 100644 index 000000000000..34d6f46b9170 --- /dev/null +++ b/docs/3.0rc/api-ref/server/automations/create-automation.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/automations/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/automations/delete-automation.mdx b/docs/3.0rc/api-ref/server/automations/delete-automation.mdx new file mode 100644 index 000000000000..3cebf930ead7 --- /dev/null +++ b/docs/3.0rc/api-ref/server/automations/delete-automation.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/automations/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/automations/delete-automations-owned-by-resource.mdx b/docs/3.0rc/api-ref/server/automations/delete-automations-owned-by-resource.mdx new file mode 100644 index 000000000000..1e706d85a9c2 --- /dev/null +++ b/docs/3.0rc/api-ref/server/automations/delete-automations-owned-by-resource.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/automations/owned-by/{resource_id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/automations/patch-automation.mdx b/docs/3.0rc/api-ref/server/automations/patch-automation.mdx new file mode 100644 index 000000000000..070e23282a17 --- /dev/null +++ b/docs/3.0rc/api-ref/server/automations/patch-automation.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/automations/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/automations/read-automation.mdx b/docs/3.0rc/api-ref/server/automations/read-automation.mdx new file mode 100644 index 000000000000..2c0859a0fa6b --- /dev/null +++ b/docs/3.0rc/api-ref/server/automations/read-automation.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/automations/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/automations/read-automations-related-to-resource.mdx b/docs/3.0rc/api-ref/server/automations/read-automations-related-to-resource.mdx new file mode 100644 index 000000000000..6652469ddd34 --- /dev/null +++ b/docs/3.0rc/api-ref/server/automations/read-automations-related-to-resource.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/automations/related-to/{resource_id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/automations/read-automations.mdx b/docs/3.0rc/api-ref/server/automations/read-automations.mdx new file mode 100644 index 000000000000..c979a837fdb4 --- /dev/null +++ b/docs/3.0rc/api-ref/server/automations/read-automations.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/automations/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/automations/update-automation.mdx b/docs/3.0rc/api-ref/server/automations/update-automation.mdx new file mode 100644 index 000000000000..eed58b161a5f --- /dev/null +++ b/docs/3.0rc/api-ref/server/automations/update-automation.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /api/automations/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/automations/validate-template.mdx b/docs/3.0rc/api-ref/server/automations/validate-template.mdx new file mode 100644 index 000000000000..425a45d6cfed --- /dev/null +++ b/docs/3.0rc/api-ref/server/automations/validate-template.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/templates/validate +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-capabilities/read-available-block-capabilities.mdx b/docs/3.0rc/api-ref/server/block-capabilities/read-available-block-capabilities.mdx new file mode 100644 index 000000000000..8c02699dcdb9 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-capabilities/read-available-block-capabilities.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/block_capabilities/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-documents/count-block-documents.mdx b/docs/3.0rc/api-ref/server/block-documents/count-block-documents.mdx new file mode 100644 index 000000000000..92350240e6f8 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-documents/count-block-documents.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/block_documents/count +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-documents/create-block-document.mdx b/docs/3.0rc/api-ref/server/block-documents/create-block-document.mdx new file mode 100644 index 000000000000..e4c4b0ad6b54 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-documents/create-block-document.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/block_documents/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-documents/delete-block-document.mdx b/docs/3.0rc/api-ref/server/block-documents/delete-block-document.mdx new file mode 100644 index 000000000000..87a10e357bdf --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-documents/delete-block-document.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/block_documents/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-documents/read-block-document-by-id.mdx b/docs/3.0rc/api-ref/server/block-documents/read-block-document-by-id.mdx new file mode 100644 index 000000000000..d44253d78f57 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-documents/read-block-document-by-id.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/block_documents/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-documents/read-block-documents.mdx b/docs/3.0rc/api-ref/server/block-documents/read-block-documents.mdx new file mode 100644 index 000000000000..05f63878eae3 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-documents/read-block-documents.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/block_documents/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-documents/update-block-document-data.mdx b/docs/3.0rc/api-ref/server/block-documents/update-block-document-data.mdx new file mode 100644 index 000000000000..9786f5ebf32b --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-documents/update-block-document-data.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/block_documents/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-schemas/create-block-schema.mdx b/docs/3.0rc/api-ref/server/block-schemas/create-block-schema.mdx new file mode 100644 index 000000000000..3d115dc7bc00 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-schemas/create-block-schema.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/block_schemas/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-schemas/delete-block-schema.mdx b/docs/3.0rc/api-ref/server/block-schemas/delete-block-schema.mdx new file mode 100644 index 000000000000..7c5d8ba862d1 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-schemas/delete-block-schema.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/block_schemas/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-schemas/read-block-schema-by-checksum.mdx b/docs/3.0rc/api-ref/server/block-schemas/read-block-schema-by-checksum.mdx new file mode 100644 index 000000000000..0043d5b507d3 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-schemas/read-block-schema-by-checksum.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/block_schemas/checksum/{checksum} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-schemas/read-block-schema-by-id.mdx b/docs/3.0rc/api-ref/server/block-schemas/read-block-schema-by-id.mdx new file mode 100644 index 000000000000..c6b9b0a3bbf0 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-schemas/read-block-schema-by-id.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/block_schemas/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-schemas/read-block-schemas.mdx b/docs/3.0rc/api-ref/server/block-schemas/read-block-schemas.mdx new file mode 100644 index 000000000000..9c997bf5b66b --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-schemas/read-block-schemas.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/block_schemas/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-types/create-block-type.mdx b/docs/3.0rc/api-ref/server/block-types/create-block-type.mdx new file mode 100644 index 000000000000..6ac4d62f41c5 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-types/create-block-type.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/block_types/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-types/delete-block-type.mdx b/docs/3.0rc/api-ref/server/block-types/delete-block-type.mdx new file mode 100644 index 000000000000..d3ff2b872d6c --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-types/delete-block-type.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/block_types/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-types/install-system-block-types.mdx b/docs/3.0rc/api-ref/server/block-types/install-system-block-types.mdx new file mode 100644 index 000000000000..720a3ad1d5c4 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-types/install-system-block-types.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/block_types/install_system_block_types +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-types/read-block-document-by-name-for-block-type.mdx b/docs/3.0rc/api-ref/server/block-types/read-block-document-by-name-for-block-type.mdx new file mode 100644 index 000000000000..9b6ec3fc3834 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-types/read-block-document-by-name-for-block-type.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/block_types/slug/{slug}/block_documents/name/{block_document_name} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-types/read-block-documents-for-block-type.mdx b/docs/3.0rc/api-ref/server/block-types/read-block-documents-for-block-type.mdx new file mode 100644 index 000000000000..579be8508530 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-types/read-block-documents-for-block-type.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/block_types/slug/{slug}/block_documents +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-types/read-block-type-by-id.mdx b/docs/3.0rc/api-ref/server/block-types/read-block-type-by-id.mdx new file mode 100644 index 000000000000..5ec5d0f695aa --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-types/read-block-type-by-id.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/block_types/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-types/read-block-type-by-slug.mdx b/docs/3.0rc/api-ref/server/block-types/read-block-type-by-slug.mdx new file mode 100644 index 000000000000..bc0bf1649b5b --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-types/read-block-type-by-slug.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/block_types/slug/{slug} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-types/read-block-types.mdx b/docs/3.0rc/api-ref/server/block-types/read-block-types.mdx new file mode 100644 index 000000000000..05f1c40900b0 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-types/read-block-types.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/block_types/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/block-types/update-block-type.mdx b/docs/3.0rc/api-ref/server/block-types/update-block-type.mdx new file mode 100644 index 000000000000..39c4900468e7 --- /dev/null +++ b/docs/3.0rc/api-ref/server/block-types/update-block-type.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/block_types/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/collections/read-view-content.mdx b/docs/3.0rc/api-ref/server/collections/read-view-content.mdx new file mode 100644 index 000000000000..506f1bf4f102 --- /dev/null +++ b/docs/3.0rc/api-ref/server/collections/read-view-content.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/collections/views/{view} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits-v2/bulk-decrement-active-slots.mdx b/docs/3.0rc/api-ref/server/concurrency-limits-v2/bulk-decrement-active-slots.mdx new file mode 100644 index 000000000000..646009675156 --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits-v2/bulk-decrement-active-slots.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/v2/concurrency_limits/decrement +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits-v2/bulk-increment-active-slots.mdx b/docs/3.0rc/api-ref/server/concurrency-limits-v2/bulk-increment-active-slots.mdx new file mode 100644 index 000000000000..dc6a3edec6e9 --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits-v2/bulk-increment-active-slots.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/v2/concurrency_limits/increment +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits-v2/create-concurrency-limit-v2.mdx b/docs/3.0rc/api-ref/server/concurrency-limits-v2/create-concurrency-limit-v2.mdx new file mode 100644 index 000000000000..f49bc24ba289 --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits-v2/create-concurrency-limit-v2.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/v2/concurrency_limits/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits-v2/delete-concurrency-limit-v2.mdx b/docs/3.0rc/api-ref/server/concurrency-limits-v2/delete-concurrency-limit-v2.mdx new file mode 100644 index 000000000000..78f9dded5b18 --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits-v2/delete-concurrency-limit-v2.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/v2/concurrency_limits/{id_or_name} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits-v2/read-all-concurrency-limits-v2.mdx b/docs/3.0rc/api-ref/server/concurrency-limits-v2/read-all-concurrency-limits-v2.mdx new file mode 100644 index 000000000000..d92d60c9307e --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits-v2/read-all-concurrency-limits-v2.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/v2/concurrency_limits/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits-v2/read-concurrency-limit-v2.mdx b/docs/3.0rc/api-ref/server/concurrency-limits-v2/read-concurrency-limit-v2.mdx new file mode 100644 index 000000000000..a926f63caa82 --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits-v2/read-concurrency-limit-v2.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/v2/concurrency_limits/{id_or_name} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits-v2/update-concurrency-limit-v2.mdx b/docs/3.0rc/api-ref/server/concurrency-limits-v2/update-concurrency-limit-v2.mdx new file mode 100644 index 000000000000..5cae4d4a3248 --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits-v2/update-concurrency-limit-v2.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/v2/concurrency_limits/{id_or_name} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits/create-concurrency-limit.mdx b/docs/3.0rc/api-ref/server/concurrency-limits/create-concurrency-limit.mdx new file mode 100644 index 000000000000..dad852324538 --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits/create-concurrency-limit.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/concurrency_limits/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits/delete-concurrency-limit-by-tag.mdx b/docs/3.0rc/api-ref/server/concurrency-limits/delete-concurrency-limit-by-tag.mdx new file mode 100644 index 000000000000..0c69b7d0ba2a --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits/delete-concurrency-limit-by-tag.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/concurrency_limits/tag/{tag} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits/delete-concurrency-limit.mdx b/docs/3.0rc/api-ref/server/concurrency-limits/delete-concurrency-limit.mdx new file mode 100644 index 000000000000..22e2699fe572 --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits/delete-concurrency-limit.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/concurrency_limits/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits/read-concurrency-limit-by-tag.mdx b/docs/3.0rc/api-ref/server/concurrency-limits/read-concurrency-limit-by-tag.mdx new file mode 100644 index 000000000000..6f8945b1d252 --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits/read-concurrency-limit-by-tag.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/concurrency_limits/tag/{tag} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits/read-concurrency-limit.mdx b/docs/3.0rc/api-ref/server/concurrency-limits/read-concurrency-limit.mdx new file mode 100644 index 000000000000..745b001eac0c --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits/read-concurrency-limit.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/concurrency_limits/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits/read-concurrency-limits.mdx b/docs/3.0rc/api-ref/server/concurrency-limits/read-concurrency-limits.mdx new file mode 100644 index 000000000000..3ff65587b93e --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits/read-concurrency-limits.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/concurrency_limits/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/concurrency-limits/reset-concurrency-limit-by-tag.mdx b/docs/3.0rc/api-ref/server/concurrency-limits/reset-concurrency-limit-by-tag.mdx new file mode 100644 index 000000000000..36940ec9b4e7 --- /dev/null +++ b/docs/3.0rc/api-ref/server/concurrency-limits/reset-concurrency-limit-by-tag.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/concurrency_limits/tag/{tag}/reset +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/create-csrf-token.mdx b/docs/3.0rc/api-ref/server/create-csrf-token.mdx new file mode 100644 index 000000000000..9c3e6c5959b9 --- /dev/null +++ b/docs/3.0rc/api-ref/server/create-csrf-token.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/csrf-token +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/count-deployments.mdx b/docs/3.0rc/api-ref/server/deployments/count-deployments.mdx new file mode 100644 index 000000000000..1834b512f01b --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/count-deployments.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/count +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/create-deployment-schedules.mdx b/docs/3.0rc/api-ref/server/deployments/create-deployment-schedules.mdx new file mode 100644 index 000000000000..1a6e9d089cfa --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/create-deployment-schedules.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/{id}/schedules +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/create-deployment.mdx b/docs/3.0rc/api-ref/server/deployments/create-deployment.mdx new file mode 100644 index 000000000000..7f603e8519b0 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/create-deployment.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/create-flow-run-from-deployment.mdx b/docs/3.0rc/api-ref/server/deployments/create-flow-run-from-deployment.mdx new file mode 100644 index 000000000000..25804a3a14d3 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/create-flow-run-from-deployment.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/{id}/create_flow_run +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/delete-deployment-schedule.mdx b/docs/3.0rc/api-ref/server/deployments/delete-deployment-schedule.mdx new file mode 100644 index 000000000000..ee9c87ae7344 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/delete-deployment-schedule.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/deployments/{id}/schedules/{schedule_id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/delete-deployment.mdx b/docs/3.0rc/api-ref/server/deployments/delete-deployment.mdx new file mode 100644 index 000000000000..971eac34c575 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/delete-deployment.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/deployments/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/get-scheduled-flow-runs-for-deployments.mdx b/docs/3.0rc/api-ref/server/deployments/get-scheduled-flow-runs-for-deployments.mdx new file mode 100644 index 000000000000..e347040b0728 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/get-scheduled-flow-runs-for-deployments.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/get_scheduled_flow_runs +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/pause-deployment-1.mdx b/docs/3.0rc/api-ref/server/deployments/pause-deployment-1.mdx new file mode 100644 index 000000000000..a50111e8c41c --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/pause-deployment-1.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/{id}/set_schedule_inactive +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/pause-deployment.mdx b/docs/3.0rc/api-ref/server/deployments/pause-deployment.mdx new file mode 100644 index 000000000000..fb44cb3f10e7 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/pause-deployment.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/{id}/pause_deployment +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/read-deployment-by-name.mdx b/docs/3.0rc/api-ref/server/deployments/read-deployment-by-name.mdx new file mode 100644 index 000000000000..85c31963d772 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/read-deployment-by-name.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/deployments/name/{flow_name}/{deployment_name} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/read-deployment-schedules.mdx b/docs/3.0rc/api-ref/server/deployments/read-deployment-schedules.mdx new file mode 100644 index 000000000000..90142305e00b --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/read-deployment-schedules.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/deployments/{id}/schedules +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/read-deployment.mdx b/docs/3.0rc/api-ref/server/deployments/read-deployment.mdx new file mode 100644 index 000000000000..745d68f25b93 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/read-deployment.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/deployments/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/read-deployments.mdx b/docs/3.0rc/api-ref/server/deployments/read-deployments.mdx new file mode 100644 index 000000000000..2f8f6680f965 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/read-deployments.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/resume-deployment-1.mdx b/docs/3.0rc/api-ref/server/deployments/resume-deployment-1.mdx new file mode 100644 index 000000000000..600b64b663bb --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/resume-deployment-1.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/{id}/set_schedule_active +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/resume-deployment.mdx b/docs/3.0rc/api-ref/server/deployments/resume-deployment.mdx new file mode 100644 index 000000000000..cde8fe2b3c1c --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/resume-deployment.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/{id}/resume_deployment +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/schedule-deployment.mdx b/docs/3.0rc/api-ref/server/deployments/schedule-deployment.mdx new file mode 100644 index 000000000000..33d8ec6df58b --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/schedule-deployment.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/deployments/{id}/schedule +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/update-deployment-schedule.mdx b/docs/3.0rc/api-ref/server/deployments/update-deployment-schedule.mdx new file mode 100644 index 000000000000..be82fcf37d88 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/update-deployment-schedule.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/deployments/{id}/schedules/{schedule_id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/update-deployment.mdx b/docs/3.0rc/api-ref/server/deployments/update-deployment.mdx new file mode 100644 index 000000000000..68661ebcdca1 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/update-deployment.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/deployments/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/deployments/work-queue-check-for-deployment.mdx b/docs/3.0rc/api-ref/server/deployments/work-queue-check-for-deployment.mdx new file mode 100644 index 000000000000..a9d5a2dabf30 --- /dev/null +++ b/docs/3.0rc/api-ref/server/deployments/work-queue-check-for-deployment.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/deployments/{id}/work_queue_check +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/events/count-account-events.mdx b/docs/3.0rc/api-ref/server/events/count-account-events.mdx new file mode 100644 index 000000000000..665ee81fd642 --- /dev/null +++ b/docs/3.0rc/api-ref/server/events/count-account-events.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/events/count-by/{countable} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/events/create-events.mdx b/docs/3.0rc/api-ref/server/events/create-events.mdx new file mode 100644 index 000000000000..a6a48bbbcbac --- /dev/null +++ b/docs/3.0rc/api-ref/server/events/create-events.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/events +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/events/read-account-events-page.mdx b/docs/3.0rc/api-ref/server/events/read-account-events-page.mdx new file mode 100644 index 000000000000..5a4092381d80 --- /dev/null +++ b/docs/3.0rc/api-ref/server/events/read-account-events-page.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/events/filter/next +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/events/read-events.mdx b/docs/3.0rc/api-ref/server/events/read-events.mdx new file mode 100644 index 000000000000..0afd7688304d --- /dev/null +++ b/docs/3.0rc/api-ref/server/events/read-events.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/events/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-run-notification-policies/create-flow-run-notification-policy.mdx b/docs/3.0rc/api-ref/server/flow-run-notification-policies/create-flow-run-notification-policy.mdx new file mode 100644 index 000000000000..53a58c411023 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-run-notification-policies/create-flow-run-notification-policy.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_run_notification_policies/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-run-notification-policies/delete-flow-run-notification-policy.mdx b/docs/3.0rc/api-ref/server/flow-run-notification-policies/delete-flow-run-notification-policy.mdx new file mode 100644 index 000000000000..1f968aee3bbd --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-run-notification-policies/delete-flow-run-notification-policy.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/flow_run_notification_policies/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-run-notification-policies/read-flow-run-notification-policies.mdx b/docs/3.0rc/api-ref/server/flow-run-notification-policies/read-flow-run-notification-policies.mdx new file mode 100644 index 000000000000..01c0e391f2ca --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-run-notification-policies/read-flow-run-notification-policies.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_run_notification_policies/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-run-notification-policies/read-flow-run-notification-policy.mdx b/docs/3.0rc/api-ref/server/flow-run-notification-policies/read-flow-run-notification-policy.mdx new file mode 100644 index 000000000000..1baaf8acc01b --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-run-notification-policies/read-flow-run-notification-policy.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/flow_run_notification_policies/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-run-notification-policies/update-flow-run-notification-policy.mdx b/docs/3.0rc/api-ref/server/flow-run-notification-policies/update-flow-run-notification-policy.mdx new file mode 100644 index 000000000000..ca2f16243035 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-run-notification-policies/update-flow-run-notification-policy.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/flow_run_notification_policies/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-run-states/read-flow-run-state.mdx b/docs/3.0rc/api-ref/server/flow-run-states/read-flow-run-state.mdx new file mode 100644 index 000000000000..e9fbc9c7ea0f --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-run-states/read-flow-run-state.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/flow_run_states/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-run-states/read-flow-run-states.mdx b/docs/3.0rc/api-ref/server/flow-run-states/read-flow-run-states.mdx new file mode 100644 index 000000000000..ff16fe5e0bfc --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-run-states/read-flow-run-states.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/flow_run_states/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/average-flow-run-lateness.mdx b/docs/3.0rc/api-ref/server/flow-runs/average-flow-run-lateness.mdx new file mode 100644 index 000000000000..ac44229ed87b --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/average-flow-run-lateness.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_runs/lateness +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/count-flow-runs.mdx b/docs/3.0rc/api-ref/server/flow-runs/count-flow-runs.mdx new file mode 100644 index 000000000000..93f899afff64 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/count-flow-runs.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_runs/count +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/create-flow-run-input.mdx b/docs/3.0rc/api-ref/server/flow-runs/create-flow-run-input.mdx new file mode 100644 index 000000000000..de6c7b80d827 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/create-flow-run-input.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_runs/{id}/input +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/create-flow-run.mdx b/docs/3.0rc/api-ref/server/flow-runs/create-flow-run.mdx new file mode 100644 index 000000000000..54e59109ee94 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/create-flow-run.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_runs/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/delete-flow-run-input.mdx b/docs/3.0rc/api-ref/server/flow-runs/delete-flow-run-input.mdx new file mode 100644 index 000000000000..20515d3c813d --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/delete-flow-run-input.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/flow_runs/{id}/input/{key} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/delete-flow-run.mdx b/docs/3.0rc/api-ref/server/flow-runs/delete-flow-run.mdx new file mode 100644 index 000000000000..3b450147d26a --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/delete-flow-run.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/flow_runs/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/filter-flow-run-input.mdx b/docs/3.0rc/api-ref/server/flow-runs/filter-flow-run-input.mdx new file mode 100644 index 000000000000..9e4289bb9b97 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/filter-flow-run-input.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_runs/{id}/input/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/flow-run-history.mdx b/docs/3.0rc/api-ref/server/flow-runs/flow-run-history.mdx new file mode 100644 index 000000000000..0f64652db567 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/flow-run-history.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_runs/history +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-graph-v1.mdx b/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-graph-v1.mdx new file mode 100644 index 000000000000..332102581531 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-graph-v1.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/flow_runs/{id}/graph +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-graph-v2.mdx b/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-graph-v2.mdx new file mode 100644 index 000000000000..0fbb58ad3056 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-graph-v2.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/flow_runs/{id}/graph-v2 +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-history.mdx b/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-history.mdx new file mode 100644 index 000000000000..0151b673eb6d --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-history.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/ui/flow_runs/history +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-input.mdx b/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-input.mdx new file mode 100644 index 000000000000..1ed8e9721b87 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/read-flow-run-input.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/flow_runs/{id}/input/{key} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/read-flow-run.mdx b/docs/3.0rc/api-ref/server/flow-runs/read-flow-run.mdx new file mode 100644 index 000000000000..cf2e3ef857db --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/read-flow-run.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/flow_runs/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/read-flow-runs.mdx b/docs/3.0rc/api-ref/server/flow-runs/read-flow-runs.mdx new file mode 100644 index 000000000000..3903b1d92243 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/read-flow-runs.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_runs/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/resume-flow-run.mdx b/docs/3.0rc/api-ref/server/flow-runs/resume-flow-run.mdx new file mode 100644 index 000000000000..0ea50add768a --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/resume-flow-run.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_runs/{id}/resume +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/set-flow-run-state.mdx b/docs/3.0rc/api-ref/server/flow-runs/set-flow-run-state.mdx new file mode 100644 index 000000000000..34d10057ec90 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/set-flow-run-state.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flow_runs/{id}/set_state +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flow-runs/update-flow-run.mdx b/docs/3.0rc/api-ref/server/flow-runs/update-flow-run.mdx new file mode 100644 index 000000000000..23b4fb25bdc3 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flow-runs/update-flow-run.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/flow_runs/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flows/count-deployments-by-flow.mdx b/docs/3.0rc/api-ref/server/flows/count-deployments-by-flow.mdx new file mode 100644 index 000000000000..26ab9ca27a58 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flows/count-deployments-by-flow.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/ui/flows/count-deployments +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flows/count-flows.mdx b/docs/3.0rc/api-ref/server/flows/count-flows.mdx new file mode 100644 index 000000000000..dc87a0734f61 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flows/count-flows.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flows/count +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flows/create-flow.mdx b/docs/3.0rc/api-ref/server/flows/create-flow.mdx new file mode 100644 index 000000000000..a18117a321a4 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flows/create-flow.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flows/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flows/delete-flow.mdx b/docs/3.0rc/api-ref/server/flows/delete-flow.mdx new file mode 100644 index 000000000000..8170303b93d1 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flows/delete-flow.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/flows/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flows/next-runs-by-flow.mdx b/docs/3.0rc/api-ref/server/flows/next-runs-by-flow.mdx new file mode 100644 index 000000000000..550c10c6cd15 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flows/next-runs-by-flow.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/ui/flows/next-runs +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flows/read-flow-by-name.mdx b/docs/3.0rc/api-ref/server/flows/read-flow-by-name.mdx new file mode 100644 index 000000000000..0d6cbfd5ca2b --- /dev/null +++ b/docs/3.0rc/api-ref/server/flows/read-flow-by-name.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/flows/name/{name} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flows/read-flow.mdx b/docs/3.0rc/api-ref/server/flows/read-flow.mdx new file mode 100644 index 000000000000..ff94133321a8 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flows/read-flow.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/flows/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flows/read-flows.mdx b/docs/3.0rc/api-ref/server/flows/read-flows.mdx new file mode 100644 index 000000000000..b521ec92ab26 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flows/read-flows.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/flows/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/flows/update-flow.mdx b/docs/3.0rc/api-ref/server/flows/update-flow.mdx new file mode 100644 index 000000000000..d54daf977696 --- /dev/null +++ b/docs/3.0rc/api-ref/server/flows/update-flow.mdx @@ -0,0 +1,3 @@ +--- +openapi: patch /api/flows/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/index.mdx b/docs/3.0rc/api-ref/server/index.mdx new file mode 100644 index 000000000000..c01abfd8373d --- /dev/null +++ b/docs/3.0rc/api-ref/server/index.mdx @@ -0,0 +1,12 @@ +--- +description: The Prefect Python SDK API enables you to interact programmatically with the Server REST API and Prefect Cloud. +tags: + - API + - Server API +--- + +# Server API + +The Prefect server API is used by the server to work with workflow metadata and enforce orchestration logic. This API is primarily used by Prefect developers. + +Select links in the left navigation menu to explore. diff --git a/docs/3.0rc/api-ref/server/logs/create-logs.mdx b/docs/3.0rc/api-ref/server/logs/create-logs.mdx new file mode 100644 index 000000000000..db47b28c279c --- /dev/null +++ b/docs/3.0rc/api-ref/server/logs/create-logs.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/logs/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/logs/read-logs.mdx b/docs/3.0rc/api-ref/server/logs/read-logs.mdx new file mode 100644 index 000000000000..3780b00a82a1 --- /dev/null +++ b/docs/3.0rc/api-ref/server/logs/read-logs.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/logs/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/root/health-check.mdx b/docs/3.0rc/api-ref/server/root/health-check.mdx new file mode 100644 index 000000000000..43e14c59d09f --- /dev/null +++ b/docs/3.0rc/api-ref/server/root/health-check.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/health +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/root/hello.mdx b/docs/3.0rc/api-ref/server/root/hello.mdx new file mode 100644 index 000000000000..ba8c0f84169a --- /dev/null +++ b/docs/3.0rc/api-ref/server/root/hello.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/hello +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/root/orion-info.mdx b/docs/3.0rc/api-ref/server/root/orion-info.mdx new file mode 100644 index 000000000000..ff423343fa87 --- /dev/null +++ b/docs/3.0rc/api-ref/server/root/orion-info.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/version +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/root/perform-readiness-check.mdx b/docs/3.0rc/api-ref/server/root/perform-readiness-check.mdx new file mode 100644 index 000000000000..60d7e92dc0ae --- /dev/null +++ b/docs/3.0rc/api-ref/server/root/perform-readiness-check.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/ready +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/savedsearches/create-saved-search.mdx b/docs/3.0rc/api-ref/server/savedsearches/create-saved-search.mdx new file mode 100644 index 000000000000..477d096b69ad --- /dev/null +++ b/docs/3.0rc/api-ref/server/savedsearches/create-saved-search.mdx @@ -0,0 +1,3 @@ +--- +openapi: put /api/saved_searches/ +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/savedsearches/delete-saved-search.mdx b/docs/3.0rc/api-ref/server/savedsearches/delete-saved-search.mdx new file mode 100644 index 000000000000..8d21c38bbb60 --- /dev/null +++ b/docs/3.0rc/api-ref/server/savedsearches/delete-saved-search.mdx @@ -0,0 +1,3 @@ +--- +openapi: delete /api/saved_searches/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/savedsearches/read-saved-search.mdx b/docs/3.0rc/api-ref/server/savedsearches/read-saved-search.mdx new file mode 100644 index 000000000000..b55bc8bdfba5 --- /dev/null +++ b/docs/3.0rc/api-ref/server/savedsearches/read-saved-search.mdx @@ -0,0 +1,3 @@ +--- +openapi: get /api/saved_searches/{id} +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/savedsearches/read-saved-searches.mdx b/docs/3.0rc/api-ref/server/savedsearches/read-saved-searches.mdx new file mode 100644 index 000000000000..fb6f2d75cf82 --- /dev/null +++ b/docs/3.0rc/api-ref/server/savedsearches/read-saved-searches.mdx @@ -0,0 +1,3 @@ +--- +openapi: post /api/saved_searches/filter +--- \ No newline at end of file diff --git a/docs/3.0rc/api-ref/server/schema.json b/docs/3.0rc/api-ref/server/schema.json new file mode 100644 index 000000000000..bf82bc841d25 --- /dev/null +++ b/docs/3.0rc/api-ref/server/schema.json @@ -0,0 +1,25269 @@ +{ + "openapi": "3.1.0", + "info": { + "title": "Server API Docs", + "version": "3.0rc" + }, + "paths": { + "/api/health": { + "get": { + "tags": [ + "Root" + ], + "summary": "Health Check", + "operationId": "health_check_health_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + } + } + } + }, + "/api/version": { + "get": { + "tags": [ + "Root" + ], + "summary": "Orion Info", + "operationId": "orion_info_version_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + } + } + } + }, + "/api/flows/": { + "post": { + "tags": [ + "Flows" + ], + "summary": "Create Flow", + "description": "Gracefully creates a new flow from the provided schema. If a flow with the\nsame name already exists, the existing flow is returned.", + "operationId": "create_flow_flows__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowCreate" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flow" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flows/{id}": { + "patch": { + "tags": [ + "Flows" + ], + "summary": "Update Flow", + "description": "Updates a flow.", + "operationId": "update_flow_flows__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow id", + "title": "Id" + }, + "description": "The flow id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "Flows" + ], + "summary": "Read Flow", + "description": "Get a flow by id.", + "operationId": "read_flow_flows__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow id", + "title": "Id" + }, + "description": "The flow id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flow" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Flows" + ], + "summary": "Delete Flow", + "description": "Delete a flow by id.", + "operationId": "delete_flow_flows__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow id", + "title": "Id" + }, + "description": "The flow id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flows/count": { + "post": { + "tags": [ + "Flows" + ], + "summary": "Count Flows", + "description": "Count flows.", + "operationId": "count_flows_flows_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_count_flows_flows_count_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "integer", + "title": "Response Count Flows Flows Count Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flows/name/{name}": { + "get": { + "tags": [ + "Flows" + ], + "summary": "Read Flow By Name", + "description": "Get a flow by name.", + "operationId": "read_flow_by_name_flows_name__name__get", + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The name of the flow", + "title": "Name" + }, + "description": "The name of the flow" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Flow" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flows/filter": { + "post": { + "tags": [ + "Flows" + ], + "summary": "Read Flows", + "description": "Query for flows.", + "operationId": "read_flows_flows_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_flows_flows_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Flow" + }, + "title": "Response Read Flows Flows Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/": { + "post": { + "tags": [ + "Flow Runs" + ], + "summary": "Create Flow Run", + "description": "Create a flow run. If a flow run with the same flow_id and\nidempotency key already exists, the existing flow run will be returned.\n\nIf no state is provided, the flow run will be created in a PENDING state.", + "operationId": "create_flow_run_flow_runs__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowRunCreate" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowRunResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/{id}": { + "patch": { + "tags": [ + "Flow Runs" + ], + "summary": "Update Flow Run", + "description": "Updates a flow run.", + "operationId": "update_flow_run_flow_runs__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowRunUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "Flow Runs" + ], + "summary": "Read Flow Run", + "description": "Get a flow run by id.", + "operationId": "read_flow_run_flow_runs__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowRunResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Flow Runs" + ], + "summary": "Delete Flow Run", + "description": "Delete a flow run by id.", + "operationId": "delete_flow_run_flow_runs__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/count": { + "post": { + "tags": [ + "Flow Runs" + ], + "summary": "Count Flow Runs", + "description": "Query for flow runs.", + "operationId": "count_flow_runs_flow_runs_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_count_flow_runs_flow_runs_count_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "integer", + "title": "Response Count Flow Runs Flow Runs Count Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/lateness": { + "post": { + "tags": [ + "Flow Runs" + ], + "summary": "Average Flow Run Lateness", + "description": "Query for average flow-run lateness in seconds.", + "operationId": "average_flow_run_lateness_flow_runs_lateness_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_average_flow_run_lateness_flow_runs_lateness_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Response Average Flow Run Lateness Flow Runs Lateness Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/history": { + "post": { + "tags": [ + "Flow Runs" + ], + "summary": "Flow Run History", + "description": "Query for flow run history data across a given range and interval.", + "operationId": "flow_run_history_flow_runs_history_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_flow_run_history_flow_runs_history_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/HistoryResponse" + }, + "title": "Response Flow Run History Flow Runs History Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/{id}/graph": { + "get": { + "tags": [ + "Flow Runs" + ], + "summary": "Read Flow Run Graph V1", + "description": "Get a task run dependency map for a given flow run.", + "operationId": "read_flow_run_graph_v1_flow_runs__id__graph_get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DependencyResult" + }, + "title": "Response Read Flow Run Graph V1 Flow Runs Id Graph Get" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/{id}/graph-v2": { + "get": { + "tags": [ + "Flow Runs" + ], + "summary": "Read Flow Run Graph V2", + "description": "Get a graph of the tasks and subflow runs for the given flow run", + "operationId": "read_flow_run_graph_v2_flow_runs__id__graph_v2_get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "since", + "in": "query", + "required": false, + "schema": { + "type": "string", + "format": "date-time", + "description": "Only include runs that start or end after this time.", + "default": "0001-01-01T00:00:00", + "title": "Since" + }, + "description": "Only include runs that start or end after this time." + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Graph" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/{id}/resume": { + "post": { + "tags": [ + "Flow Runs" + ], + "summary": "Resume Flow Run", + "description": "Resume a paused flow run.", + "operationId": "resume_flow_run_flow_runs__id__resume_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_resume_flow_run_flow_runs__id__resume_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OrchestrationResult" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/filter": { + "post": { + "tags": [ + "Flow Runs" + ], + "summary": "Read Flow Runs", + "description": "Query for flow runs.", + "operationId": "read_flow_runs_flow_runs_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_flow_runs_flow_runs_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FlowRunResponse" + }, + "title": "Response Read Flow Runs Flow Runs Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/{id}/set_state": { + "post": { + "tags": [ + "Flow Runs" + ], + "summary": "Set Flow Run State", + "description": "Set a flow run state, invoking any orchestration rules.", + "operationId": "set_flow_run_state_flow_runs__id__set_state_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_set_flow_run_state_flow_runs__id__set_state_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OrchestrationResult" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/{id}/input": { + "post": { + "tags": [ + "Flow Runs" + ], + "summary": "Create Flow Run Input", + "description": "Create a key/value input for a flow run.", + "operationId": "create_flow_run_input_flow_runs__id__input_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_create_flow_run_input_flow_runs__id__input_post" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/{id}/input/filter": { + "post": { + "tags": [ + "Flow Runs" + ], + "summary": "Filter Flow Run Input", + "description": "Filter flow run inputs by key prefix", + "operationId": "filter_flow_run_input_flow_runs__id__input_filter_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_filter_flow_run_input_flow_runs__id__input_filter_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FlowRunInput" + }, + "title": "Response Filter Flow Run Input Flow Runs Id Input Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_runs/{id}/input/{key}": { + "get": { + "tags": [ + "Flow Runs" + ], + "summary": "Read Flow Run Input", + "description": "Create a value from a flow run input", + "operationId": "read_flow_run_input_flow_runs__id__input__key__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The input key", + "title": "Key" + }, + "description": "The input key" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Flow Runs" + ], + "summary": "Delete Flow Run Input", + "description": "Delete a flow run input", + "operationId": "delete_flow_run_input_flow_runs__id__input__key__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run id", + "title": "Id" + }, + "description": "The flow run id" + }, + { + "name": "key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The input key", + "title": "Key" + }, + "description": "The input key" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/task_runs/": { + "post": { + "tags": [ + "Task Runs" + ], + "summary": "Create Task Run", + "description": "Create a task run. If a task run with the same flow_run_id,\ntask_key, and dynamic_key already exists, the existing task\nrun will be returned.\n\nIf no state is provided, the task run will be created in a PENDING state.", + "operationId": "create_task_run_task_runs__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TaskRunCreate" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TaskRun" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/task_runs/{id}": { + "patch": { + "tags": [ + "Task Runs" + ], + "summary": "Update Task Run", + "description": "Updates a task run.", + "operationId": "update_task_run_task_runs__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The task run id", + "title": "Id" + }, + "description": "The task run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TaskRunUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "Task Runs" + ], + "summary": "Read Task Run", + "description": "Get a task run by id.", + "operationId": "read_task_run_task_runs__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The task run id", + "title": "Id" + }, + "description": "The task run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TaskRun" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Task Runs" + ], + "summary": "Delete Task Run", + "description": "Delete a task run by id.", + "operationId": "delete_task_run_task_runs__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The task run id", + "title": "Id" + }, + "description": "The task run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/task_runs/count": { + "post": { + "tags": [ + "Task Runs" + ], + "summary": "Count Task Runs", + "description": "Count task runs.", + "operationId": "count_task_runs_task_runs_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_count_task_runs_task_runs_count_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "integer", + "title": "Response Count Task Runs Task Runs Count Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/task_runs/history": { + "post": { + "tags": [ + "Task Runs" + ], + "summary": "Task Run History", + "description": "Query for task run history data across a given range and interval.", + "operationId": "task_run_history_task_runs_history_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_task_run_history_task_runs_history_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/HistoryResponse" + }, + "title": "Response Task Run History Task Runs History Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/task_runs/filter": { + "post": { + "tags": [ + "Task Runs" + ], + "summary": "Read Task Runs", + "description": "Query for task runs.", + "operationId": "read_task_runs_task_runs_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_task_runs_task_runs_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TaskRun" + }, + "title": "Response Read Task Runs Task Runs Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/task_runs/{id}/set_state": { + "post": { + "tags": [ + "Task Runs" + ], + "summary": "Set Task Run State", + "description": "Set a task run state, invoking any orchestration rules.", + "operationId": "set_task_run_state_task_runs__id__set_state_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The task run id", + "title": "Id" + }, + "description": "The task run id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_set_task_run_state_task_runs__id__set_state_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OrchestrationResult" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_run_states/{id}": { + "get": { + "tags": [ + "Flow Run States" + ], + "summary": "Read Flow Run State", + "description": "Get a flow run state by id.", + "operationId": "read_flow_run_state_flow_run_states__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run state id", + "title": "Id" + }, + "description": "The flow run state id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/State" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_run_states/": { + "get": { + "tags": [ + "Flow Run States" + ], + "summary": "Read Flow Run States", + "description": "Get states associated with a flow run.", + "operationId": "read_flow_run_states_flow_run_states__get", + "parameters": [ + { + "name": "flow_run_id", + "in": "query", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Flow Run Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/State" + }, + "title": "Response Read Flow Run States Flow Run States Get" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/task_run_states/{id}": { + "get": { + "tags": [ + "Task Run States" + ], + "summary": "Read Task Run State", + "description": "Get a task run state by id.", + "operationId": "read_task_run_state_task_run_states__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The task run state id", + "title": "Id" + }, + "description": "The task run state id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/State" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/task_run_states/": { + "get": { + "tags": [ + "Task Run States" + ], + "summary": "Read Task Run States", + "description": "Get states associated with a task run.", + "operationId": "read_task_run_states_task_run_states__get", + "parameters": [ + { + "name": "task_run_id", + "in": "query", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Task Run Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/State" + }, + "title": "Response Read Task Run States Task Run States Get" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_run_notification_policies/": { + "post": { + "tags": [ + "Flow Run Notification Policies" + ], + "summary": "Create Flow Run Notification Policy", + "description": "Creates a new flow run notification policy.", + "operationId": "create_flow_run_notification_policy_flow_run_notification_policies__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowRunNotificationPolicyCreate" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowRunNotificationPolicy" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_run_notification_policies/{id}": { + "patch": { + "tags": [ + "Flow Run Notification Policies" + ], + "summary": "Update Flow Run Notification Policy", + "description": "Updates an existing flow run notification policy.", + "operationId": "update_flow_run_notification_policy_flow_run_notification_policies__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run notification policy id", + "title": "Id" + }, + "description": "The flow run notification policy id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowRunNotificationPolicyUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "Flow Run Notification Policies" + ], + "summary": "Read Flow Run Notification Policy", + "description": "Get a flow run notification policy by id.", + "operationId": "read_flow_run_notification_policy_flow_run_notification_policies__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run notification policy id", + "title": "Id" + }, + "description": "The flow run notification policy id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowRunNotificationPolicy" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Flow Run Notification Policies" + ], + "summary": "Delete Flow Run Notification Policy", + "description": "Delete a flow run notification policy by id.", + "operationId": "delete_flow_run_notification_policy_flow_run_notification_policies__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The flow run notification policy id", + "title": "Id" + }, + "description": "The flow run notification policy id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/flow_run_notification_policies/filter": { + "post": { + "tags": [ + "Flow Run Notification Policies" + ], + "summary": "Read Flow Run Notification Policies", + "description": "Query for flow run notification policies.", + "operationId": "read_flow_run_notification_policies_flow_run_notification_policies_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_flow_run_notification_policies_flow_run_notification_policies_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FlowRunNotificationPolicy" + }, + "title": "Response Read Flow Run Notification Policies Flow Run Notification Policies Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/": { + "post": { + "tags": [ + "Deployments" + ], + "summary": "Create Deployment", + "description": "Gracefully creates a new deployment from the provided schema. If a deployment with\nthe same name and flow_id already exists, the deployment is updated.\n\nIf the deployment has an active schedule, flow runs will be scheduled.\nWhen upserting, any scheduled runs from the existing deployment will be deleted.", + "operationId": "create_deployment_deployments__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentCreate" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/{id}": { + "patch": { + "tags": [ + "Deployments" + ], + "summary": "Update Deployment", + "operationId": "update_deployment_deployments__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "Deployments" + ], + "summary": "Read Deployment", + "description": "Get a deployment by id.", + "operationId": "read_deployment_deployments__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Deployments" + ], + "summary": "Delete Deployment", + "description": "Delete a deployment by id.", + "operationId": "delete_deployment_deployments__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/name/{flow_name}/{deployment_name}": { + "get": { + "tags": [ + "Deployments" + ], + "summary": "Read Deployment By Name", + "description": "Get a deployment using the name of the flow and the deployment.", + "operationId": "read_deployment_by_name_deployments_name__flow_name___deployment_name__get", + "parameters": [ + { + "name": "flow_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The name of the flow", + "title": "Flow Name" + }, + "description": "The name of the flow" + }, + { + "name": "deployment_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The name of the deployment", + "title": "Deployment Name" + }, + "description": "The name of the deployment" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/filter": { + "post": { + "tags": [ + "Deployments" + ], + "summary": "Read Deployments", + "description": "Query for deployments.", + "operationId": "read_deployments_deployments_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_deployments_deployments_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeploymentResponse" + }, + "title": "Response Read Deployments Deployments Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/get_scheduled_flow_runs": { + "post": { + "tags": [ + "Deployments" + ], + "summary": "Get Scheduled Flow Runs For Deployments", + "description": "Get scheduled runs for a set of deployments. Used by a runner to poll for work.", + "operationId": "get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FlowRunResponse" + }, + "title": "Response Get Scheduled Flow Runs For Deployments Deployments Get Scheduled Flow Runs Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/count": { + "post": { + "tags": [ + "Deployments" + ], + "summary": "Count Deployments", + "description": "Count deployments.", + "operationId": "count_deployments_deployments_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_count_deployments_deployments_count_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "integer", + "title": "Response Count Deployments Deployments Count Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/{id}/schedule": { + "post": { + "tags": [ + "Deployments" + ], + "summary": "Schedule Deployment", + "description": "Schedule runs for a deployment. For backfills, provide start/end times in the past.\n\nThis function will generate the minimum number of runs that satisfy the min\nand max times, and the min and max counts. Specifically, the following order\nwill be respected.\n\n - Runs will be generated starting on or after the `start_time`\n - No more than `max_runs` runs will be generated\n - No runs will be generated after `end_time` is reached\n - At least `min_runs` runs will be generated\n - Runs will be generated until at least `start_time + min_time` is reached", + "operationId": "schedule_deployment_deployments__id__schedule_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_schedule_deployment_deployments__id__schedule_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/{id}/resume_deployment": { + "post": { + "tags": [ + "Deployments" + ], + "summary": "Resume Deployment", + "description": "Set a deployment schedule to active. Runs will be scheduled immediately.", + "operationId": "resume_deployment_deployments__id__resume_deployment_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/{id}/set_schedule_active": { + "post": { + "tags": [ + "Deployments" + ], + "summary": "Resume Deployment", + "description": "Set a deployment schedule to active. Runs will be scheduled immediately.", + "operationId": "resume_deployment_deployments__id__set_schedule_active_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/{id}/pause_deployment": { + "post": { + "tags": [ + "Deployments" + ], + "summary": "Pause Deployment", + "description": "Set a deployment schedule to inactive. Any auto-scheduled runs still in a Scheduled\nstate will be deleted.", + "operationId": "pause_deployment_deployments__id__pause_deployment_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/{id}/set_schedule_inactive": { + "post": { + "tags": [ + "Deployments" + ], + "summary": "Pause Deployment", + "description": "Set a deployment schedule to inactive. Any auto-scheduled runs still in a Scheduled\nstate will be deleted.", + "operationId": "pause_deployment_deployments__id__set_schedule_inactive_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/{id}/create_flow_run": { + "post": { + "tags": [ + "Deployments" + ], + "summary": "Create Flow Run From Deployment", + "description": "Create a flow run from a deployment.\n\nAny parameters not provided will be inferred from the deployment's parameters.\nIf tags are not provided, the deployment's tags will be used.\n\nIf no state is provided, the flow run will be created in a SCHEDULED state.", + "operationId": "create_flow_run_from_deployment_deployments__id__create_flow_run_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentFlowRunCreate" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/FlowRunResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/{id}/work_queue_check": { + "get": { + "tags": [ + "Deployments" + ], + "summary": "Work Queue Check For Deployment", + "description": "Get list of work-queues that are able to pick up the specified deployment.\n\nThis endpoint is intended to be used by the UI to provide users warnings\nabout deployments that are unable to be executed because there are no work\nqueues that will pick up their runs, based on existing filter criteria. It\nmay be deprecated in the future because there is not a strict relationship\nbetween work queues and deployments.", + "operationId": "work_queue_check_for_deployment_deployments__id__work_queue_check_get", + "deprecated": true, + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WorkQueue" + }, + "title": "Response Work Queue Check For Deployment Deployments Id Work Queue Check Get" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/{id}/schedules": { + "get": { + "tags": [ + "Deployments" + ], + "summary": "Read Deployment Schedules", + "operationId": "read_deployment_schedules_deployments__id__schedules_get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeploymentSchedule" + }, + "title": "Response Read Deployment Schedules Deployments Id Schedules Get" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "post": { + "tags": [ + "Deployments" + ], + "summary": "Create Deployment Schedules", + "operationId": "create_deployment_schedules_deployments__id__schedules_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeploymentScheduleCreate" + }, + "description": "The schedules to create", + "title": "Schedules" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/DeploymentSchedule" + }, + "title": "Response Create Deployment Schedules Deployments Id Schedules Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/deployments/{id}/schedules/{schedule_id}": { + "patch": { + "tags": [ + "Deployments" + ], + "summary": "Update Deployment Schedule", + "operationId": "update_deployment_schedule_deployments__id__schedules__schedule_id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "schedule_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The schedule id", + "title": "Schedule Id" + }, + "description": "The schedule id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentScheduleUpdate" + } + ], + "description": "The updated schedule", + "title": "Schedule" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Deployments" + ], + "summary": "Delete Deployment Schedule", + "operationId": "delete_deployment_schedule_deployments__id__schedules__schedule_id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The deployment id", + "title": "Id" + }, + "description": "The deployment id" + }, + { + "name": "schedule_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The schedule id", + "title": "Schedule Id" + }, + "description": "The schedule id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/saved_searches/": { + "put": { + "tags": [ + "SavedSearches" + ], + "summary": "Create Saved Search", + "description": "Gracefully creates a new saved search from the provided schema.\n\nIf a saved search with the same name already exists, the saved search's fields are\nreplaced.", + "operationId": "create_saved_search_saved_searches__put", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SavedSearchCreate" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SavedSearch" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/saved_searches/{id}": { + "get": { + "tags": [ + "SavedSearches" + ], + "summary": "Read Saved Search", + "description": "Get a saved search by id.", + "operationId": "read_saved_search_saved_searches__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The saved search id", + "title": "Id" + }, + "description": "The saved search id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SavedSearch" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "SavedSearches" + ], + "summary": "Delete Saved Search", + "description": "Delete a saved search by id.", + "operationId": "delete_saved_search_saved_searches__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The saved search id", + "title": "Id" + }, + "description": "The saved search id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/saved_searches/filter": { + "post": { + "tags": [ + "SavedSearches" + ], + "summary": "Read Saved Searches", + "description": "Query for saved searches.", + "operationId": "read_saved_searches_saved_searches_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_saved_searches_saved_searches_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SavedSearch" + }, + "title": "Response Read Saved Searches Saved Searches Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/logs/": { + "post": { + "tags": [ + "Logs" + ], + "summary": "Create Logs", + "description": "Create new logs from the provided schema.", + "operationId": "create_logs_logs__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/LogCreate" + }, + "title": "Logs" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/logs/filter": { + "post": { + "tags": [ + "Logs" + ], + "summary": "Read Logs", + "description": "Query for logs.", + "operationId": "read_logs_logs_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_logs_logs_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Log" + }, + "title": "Response Read Logs Logs Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/concurrency_limits/": { + "post": { + "tags": [ + "Concurrency Limits" + ], + "summary": "Create Concurrency Limit", + "operationId": "create_concurrency_limit_concurrency_limits__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConcurrencyLimitCreate" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConcurrencyLimit" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/concurrency_limits/{id}": { + "get": { + "tags": [ + "Concurrency Limits" + ], + "summary": "Read Concurrency Limit", + "description": "Get a concurrency limit by id.\n\nThe `active slots` field contains a list of TaskRun IDs currently using a\nconcurrency slot for the specified tag.", + "operationId": "read_concurrency_limit_concurrency_limits__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The concurrency limit id", + "title": "Id" + }, + "description": "The concurrency limit id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConcurrencyLimit" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Concurrency Limits" + ], + "summary": "Delete Concurrency Limit", + "operationId": "delete_concurrency_limit_concurrency_limits__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The concurrency limit id", + "title": "Id" + }, + "description": "The concurrency limit id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/concurrency_limits/tag/{tag}": { + "get": { + "tags": [ + "Concurrency Limits" + ], + "summary": "Read Concurrency Limit By Tag", + "description": "Get a concurrency limit by tag.\n\nThe `active slots` field contains a list of TaskRun IDs currently using a\nconcurrency slot for the specified tag.", + "operationId": "read_concurrency_limit_by_tag_concurrency_limits_tag__tag__get", + "parameters": [ + { + "name": "tag", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The tag name", + "title": "Tag" + }, + "description": "The tag name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConcurrencyLimit" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Concurrency Limits" + ], + "summary": "Delete Concurrency Limit By Tag", + "operationId": "delete_concurrency_limit_by_tag_concurrency_limits_tag__tag__delete", + "parameters": [ + { + "name": "tag", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The tag name", + "title": "Tag" + }, + "description": "The tag name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/concurrency_limits/filter": { + "post": { + "tags": [ + "Concurrency Limits" + ], + "summary": "Read Concurrency Limits", + "description": "Query for concurrency limits.\n\nFor each concurrency limit the `active slots` field contains a list of TaskRun IDs\ncurrently using a concurrency slot for the specified tag.", + "operationId": "read_concurrency_limits_concurrency_limits_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_concurrency_limits_concurrency_limits_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ConcurrencyLimit" + }, + "title": "Response Read Concurrency Limits Concurrency Limits Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/concurrency_limits/tag/{tag}/reset": { + "post": { + "tags": [ + "Concurrency Limits" + ], + "summary": "Reset Concurrency Limit By Tag", + "operationId": "reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post", + "parameters": [ + { + "name": "tag", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The tag name", + "title": "Tag" + }, + "description": "The tag name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/v2/concurrency_limits/": { + "post": { + "tags": [ + "Concurrency Limits V2" + ], + "summary": "Create Concurrency Limit V2", + "operationId": "create_concurrency_limit_v2_v2_concurrency_limits__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConcurrencyLimitV2Create" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConcurrencyLimitV2" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/v2/concurrency_limits/{id_or_name}": { + "get": { + "tags": [ + "Concurrency Limits V2" + ], + "summary": "Read Concurrency Limit V2", + "operationId": "read_concurrency_limit_v2_v2_concurrency_limits__id_or_name__get", + "parameters": [ + { + "name": "id_or_name", + "in": "path", + "required": true, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "string" + } + ], + "description": "The ID or name of the concurrency limit", + "title": "Id Or Name" + }, + "description": "The ID or name of the concurrency limit" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GlobalConcurrencyLimitResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Concurrency Limits V2" + ], + "summary": "Update Concurrency Limit V2", + "operationId": "update_concurrency_limit_v2_v2_concurrency_limits__id_or_name__patch", + "parameters": [ + { + "name": "id_or_name", + "in": "path", + "required": true, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "string" + } + ], + "description": "The ID or name of the concurrency limit", + "title": "Id Or Name" + }, + "description": "The ID or name of the concurrency limit" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ConcurrencyLimitV2Update" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Concurrency Limits V2" + ], + "summary": "Delete Concurrency Limit V2", + "operationId": "delete_concurrency_limit_v2_v2_concurrency_limits__id_or_name__delete", + "parameters": [ + { + "name": "id_or_name", + "in": "path", + "required": true, + "schema": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "string" + } + ], + "description": "The ID or name of the concurrency limit", + "title": "Id Or Name" + }, + "description": "The ID or name of the concurrency limit" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/v2/concurrency_limits/filter": { + "post": { + "tags": [ + "Concurrency Limits V2" + ], + "summary": "Read All Concurrency Limits V2", + "operationId": "read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/GlobalConcurrencyLimitResponse" + }, + "title": "Response Read All Concurrency Limits V2 V2 Concurrency Limits Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/v2/concurrency_limits/increment": { + "post": { + "tags": [ + "Concurrency Limits V2" + ], + "summary": "Bulk Increment Active Slots", + "operationId": "bulk_increment_active_slots_v2_concurrency_limits_increment_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_bulk_increment_active_slots_v2_concurrency_limits_increment_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MinimalConcurrencyLimitResponse" + }, + "title": "Response Bulk Increment Active Slots V2 Concurrency Limits Increment Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/v2/concurrency_limits/decrement": { + "post": { + "tags": [ + "Concurrency Limits V2" + ], + "summary": "Bulk Decrement Active Slots", + "operationId": "bulk_decrement_active_slots_v2_concurrency_limits_decrement_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_bulk_decrement_active_slots_v2_concurrency_limits_decrement_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MinimalConcurrencyLimitResponse" + }, + "title": "Response Bulk Decrement Active Slots V2 Concurrency Limits Decrement Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_types/": { + "post": { + "tags": [ + "Block types" + ], + "summary": "Create Block Type", + "description": "Create a new block type", + "operationId": "create_block_type_block_types__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockTypeCreate" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockType" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_types/{id}": { + "get": { + "tags": [ + "Block types" + ], + "summary": "Read Block Type By Id", + "description": "Get a block type by ID.", + "operationId": "read_block_type_by_id_block_types__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The block type ID", + "title": "Id" + }, + "description": "The block type ID" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockType" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Block types" + ], + "summary": "Update Block Type", + "description": "Update a block type.", + "operationId": "update_block_type_block_types__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The block type ID", + "title": "Id" + }, + "description": "The block type ID" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockTypeUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Block types" + ], + "summary": "Delete Block Type", + "operationId": "delete_block_type_block_types__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The block type ID", + "title": "Id" + }, + "description": "The block type ID" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_types/slug/{slug}": { + "get": { + "tags": [ + "Block types" + ], + "summary": "Read Block Type By Slug", + "description": "Get a block type by name.", + "operationId": "read_block_type_by_slug_block_types_slug__slug__get", + "parameters": [ + { + "name": "slug", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The block type name", + "title": "Slug" + }, + "description": "The block type name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockType" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_types/filter": { + "post": { + "tags": [ + "Block types" + ], + "summary": "Read Block Types", + "description": "Gets all block types. Optionally limit return with limit and offset.", + "operationId": "read_block_types_block_types_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_block_types_block_types_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BlockType" + }, + "title": "Response Read Block Types Block Types Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_types/slug/{slug}/block_documents": { + "get": { + "tags": [ + "Block types", + "Block types", + "Block documents" + ], + "summary": "Read Block Documents For Block Type", + "operationId": "read_block_documents_for_block_type_block_types_slug__slug__block_documents_get", + "parameters": [ + { + "name": "slug", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The block type name", + "title": "Slug" + }, + "description": "The block type name" + }, + { + "name": "include_secrets", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "description": "Whether to include sensitive values in the block document.", + "default": false, + "title": "Include Secrets" + }, + "description": "Whether to include sensitive values in the block document." + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BlockDocument" + }, + "title": "Response Read Block Documents For Block Type Block Types Slug Slug Block Documents Get" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_types/slug/{slug}/block_documents/name/{block_document_name}": { + "get": { + "tags": [ + "Block types", + "Block types", + "Block documents" + ], + "summary": "Read Block Document By Name For Block Type", + "operationId": "read_block_document_by_name_for_block_type_block_types_slug__slug__block_documents_name__block_document_name__get", + "parameters": [ + { + "name": "slug", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The block type name", + "title": "Slug" + }, + "description": "The block type name" + }, + { + "name": "block_document_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The block type name", + "title": "Block Document Name" + }, + "description": "The block type name" + }, + { + "name": "include_secrets", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "description": "Whether to include sensitive values in the block document.", + "default": false, + "title": "Include Secrets" + }, + "description": "Whether to include sensitive values in the block document." + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockDocument" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_types/install_system_block_types": { + "post": { + "tags": [ + "Block types" + ], + "summary": "Install System Block Types", + "operationId": "install_system_block_types_block_types_install_system_block_types_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_documents/": { + "post": { + "tags": [ + "Block documents" + ], + "summary": "Create Block Document", + "description": "Create a new block document.", + "operationId": "create_block_document_block_documents__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockDocumentCreate" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockDocument" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_documents/filter": { + "post": { + "tags": [ + "Block documents" + ], + "summary": "Read Block Documents", + "description": "Query for block documents.", + "operationId": "read_block_documents_block_documents_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_block_documents_block_documents_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BlockDocument" + }, + "title": "Response Read Block Documents Block Documents Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_documents/count": { + "post": { + "tags": [ + "Block documents" + ], + "summary": "Count Block Documents", + "description": "Count block documents.", + "operationId": "count_block_documents_block_documents_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_count_block_documents_block_documents_count_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "integer", + "title": "Response Count Block Documents Block Documents Count Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_documents/{id}": { + "get": { + "tags": [ + "Block documents" + ], + "summary": "Read Block Document By Id", + "operationId": "read_block_document_by_id_block_documents__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The block document id", + "title": "Id" + }, + "description": "The block document id" + }, + { + "name": "include_secrets", + "in": "query", + "required": false, + "schema": { + "type": "boolean", + "description": "Whether to include sensitive values in the block document.", + "default": false, + "title": "Include Secrets" + }, + "description": "Whether to include sensitive values in the block document." + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockDocument" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Block documents" + ], + "summary": "Delete Block Document", + "operationId": "delete_block_document_block_documents__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The block document id", + "title": "Id" + }, + "description": "The block document id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Block documents" + ], + "summary": "Update Block Document Data", + "operationId": "update_block_document_data_block_documents__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The block document id", + "title": "Id" + }, + "description": "The block document id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockDocumentUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/": { + "post": { + "tags": [ + "Work Pools" + ], + "summary": "Create Work Pool", + "description": "Creates a new work pool. If a work pool with the same\nname already exists, an error will be raised.", + "operationId": "create_work_pool_work_pools__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkPoolCreate" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkPool" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/{name}": { + "get": { + "tags": [ + "Work Pools" + ], + "summary": "Read Work Pool", + "description": "Read a work pool by name", + "operationId": "read_work_pool_work_pools__name__get", + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Name" + }, + "description": "The work pool name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkPool" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Work Pools" + ], + "summary": "Update Work Pool", + "description": "Update a work pool", + "operationId": "update_work_pool_work_pools__name__patch", + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Name" + }, + "description": "The work pool name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkPoolUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Work Pools" + ], + "summary": "Delete Work Pool", + "description": "Delete a work pool", + "operationId": "delete_work_pool_work_pools__name__delete", + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Name" + }, + "description": "The work pool name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/filter": { + "post": { + "tags": [ + "Work Pools" + ], + "summary": "Read Work Pools", + "description": "Read multiple work pools", + "operationId": "read_work_pools_work_pools_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_work_pools_work_pools_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WorkPool" + }, + "title": "Response Read Work Pools Work Pools Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/count": { + "post": { + "tags": [ + "Work Pools" + ], + "summary": "Count Work Pools", + "description": "Count work pools", + "operationId": "count_work_pools_work_pools_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_count_work_pools_work_pools_count_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "integer", + "title": "Response Count Work Pools Work Pools Count Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/{name}/get_scheduled_flow_runs": { + "post": { + "tags": [ + "Work Pools" + ], + "summary": "Get Scheduled Flow Runs", + "description": "Load scheduled runs for a worker", + "operationId": "get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post", + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Name" + }, + "description": "The work pool name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WorkerFlowRunResponse" + }, + "title": "Response Get Scheduled Flow Runs Work Pools Name Get Scheduled Flow Runs Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/{work_pool_name}/queues": { + "post": { + "tags": [ + "Work Pools" + ], + "summary": "Create Work Queue", + "description": "Creates a new work pool queue. If a work pool queue with the same\nname already exists, an error will be raised.", + "operationId": "create_work_queue_work_pools__work_pool_name__queues_post", + "parameters": [ + { + "name": "work_pool_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Work Pool Name" + }, + "description": "The work pool name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkQueueCreate" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkQueueResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/{work_pool_name}/queues/{name}": { + "get": { + "tags": [ + "Work Pools" + ], + "summary": "Read Work Queue", + "description": "Read a work pool queue", + "operationId": "read_work_queue_work_pools__work_pool_name__queues__name__get", + "parameters": [ + { + "name": "work_pool_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Work Pool Name" + }, + "description": "The work pool name" + }, + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool queue name", + "title": "Name" + }, + "description": "The work pool queue name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkQueueResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Work Pools" + ], + "summary": "Update Work Queue", + "description": "Update a work pool queue", + "operationId": "update_work_queue_work_pools__work_pool_name__queues__name__patch", + "parameters": [ + { + "name": "work_pool_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Work Pool Name" + }, + "description": "The work pool name" + }, + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool queue name", + "title": "Name" + }, + "description": "The work pool queue name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkQueueUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Work Pools" + ], + "summary": "Delete Work Queue", + "description": "Delete a work pool queue", + "operationId": "delete_work_queue_work_pools__work_pool_name__queues__name__delete", + "parameters": [ + { + "name": "work_pool_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Work Pool Name" + }, + "description": "The work pool name" + }, + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool queue name", + "title": "Name" + }, + "description": "The work pool queue name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/{work_pool_name}/queues/filter": { + "post": { + "tags": [ + "Work Pools" + ], + "summary": "Read Work Queues", + "description": "Read all work pool queues", + "operationId": "read_work_queues_work_pools__work_pool_name__queues_filter_post", + "parameters": [ + { + "name": "work_pool_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Work Pool Name" + }, + "description": "The work pool name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_work_queues_work_pools__work_pool_name__queues_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WorkQueueResponse" + }, + "title": "Response Read Work Queues Work Pools Work Pool Name Queues Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/{work_pool_name}/workers/heartbeat": { + "post": { + "tags": [ + "Work Pools" + ], + "summary": "Worker Heartbeat", + "operationId": "worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post", + "parameters": [ + { + "name": "work_pool_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Work Pool Name" + }, + "description": "The work pool name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/{work_pool_name}/workers/filter": { + "post": { + "tags": [ + "Work Pools" + ], + "summary": "Read Workers", + "description": "Read all worker processes", + "operationId": "read_workers_work_pools__work_pool_name__workers_filter_post", + "parameters": [ + { + "name": "work_pool_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Work Pool Name" + }, + "description": "The work pool name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_workers_work_pools__work_pool_name__workers_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WorkerResponse" + }, + "title": "Response Read Workers Work Pools Work Pool Name Workers Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_pools/{work_pool_name}/workers/{name}": { + "delete": { + "tags": [ + "Work Pools" + ], + "summary": "Delete Worker", + "description": "Delete a work pool's worker", + "operationId": "delete_worker_work_pools__work_pool_name__workers__name__delete", + "parameters": [ + { + "name": "work_pool_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool name", + "title": "Work Pool Name" + }, + "description": "The work pool name" + }, + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work pool's worker name", + "title": "Name" + }, + "description": "The work pool's worker name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_queues/": { + "post": { + "tags": [ + "Work Queues" + ], + "summary": "Create Work Queue", + "description": "Creates a new work queue.\n\nIf a work queue with the same name already exists, an error\nwill be raised.", + "operationId": "create_work_queue_work_queues__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkQueueCreate" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkQueueResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_queues/{id}": { + "patch": { + "tags": [ + "Work Queues" + ], + "summary": "Update Work Queue", + "description": "Updates an existing work queue.", + "operationId": "update_work_queue_work_queues__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The work queue id", + "title": "Id" + }, + "description": "The work queue id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkQueueUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "Work Queues" + ], + "summary": "Read Work Queue", + "description": "Get a work queue by id.", + "operationId": "read_work_queue_work_queues__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The work queue id", + "title": "Id" + }, + "description": "The work queue id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkQueueResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Work Queues" + ], + "summary": "Delete Work Queue", + "description": "Delete a work queue by id.", + "operationId": "delete_work_queue_work_queues__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The work queue id", + "title": "Id" + }, + "description": "The work queue id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_queues/name/{name}": { + "get": { + "tags": [ + "Work Queues" + ], + "summary": "Read Work Queue By Name", + "description": "Get a work queue by id.", + "operationId": "read_work_queue_by_name_work_queues_name__name__get", + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The work queue name", + "title": "Name" + }, + "description": "The work queue name" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkQueueResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_queues/{id}/get_runs": { + "post": { + "tags": [ + "Work Queues" + ], + "summary": "Read Work Queue Runs", + "description": "Get flow runs from the work queue.", + "operationId": "read_work_queue_runs_work_queues__id__get_runs_post", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The work queue id", + "title": "Id" + }, + "description": "The work queue id" + }, + { + "name": "x-prefect-ui", + "in": "header", + "required": false, + "schema": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "description": "A header to indicate this request came from the Prefect UI.", + "default": false, + "title": "X-Prefect-Ui" + }, + "description": "A header to indicate this request came from the Prefect UI." + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_work_queue_runs_work_queues__id__get_runs_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FlowRunResponse" + }, + "title": "Response Read Work Queue Runs Work Queues Id Get Runs Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_queues/filter": { + "post": { + "tags": [ + "Work Queues" + ], + "summary": "Read Work Queues", + "description": "Query for work queues.", + "operationId": "read_work_queues_work_queues_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_work_queues_work_queues_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WorkQueueResponse" + }, + "title": "Response Read Work Queues Work Queues Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/work_queues/{id}/status": { + "get": { + "tags": [ + "Work Queues" + ], + "summary": "Read Work Queue Status", + "description": "Get the status of a work queue.", + "operationId": "read_work_queue_status_work_queues__id__status_get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The work queue id", + "title": "Id" + }, + "description": "The work queue id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WorkQueueStatusDetail" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/artifacts/": { + "post": { + "tags": [ + "Artifacts" + ], + "summary": "Create Artifact", + "operationId": "create_artifact_artifacts__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArtifactCreate" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Artifact" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/artifacts/{id}": { + "get": { + "tags": [ + "Artifacts" + ], + "summary": "Read Artifact", + "description": "Retrieve an artifact from the database.", + "operationId": "read_artifact_artifacts__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The ID of the artifact to retrieve.", + "title": "Id" + }, + "description": "The ID of the artifact to retrieve." + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Artifact" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Artifacts" + ], + "summary": "Update Artifact", + "description": "Update an artifact in the database.", + "operationId": "update_artifact_artifacts__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The ID of the artifact to update.", + "title": "Id" + }, + "description": "The ID of the artifact to update." + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ArtifactUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Artifacts" + ], + "summary": "Delete Artifact", + "description": "Delete an artifact from the database.", + "operationId": "delete_artifact_artifacts__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The ID of the artifact to delete.", + "title": "Id" + }, + "description": "The ID of the artifact to delete." + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/artifacts/{key}/latest": { + "get": { + "tags": [ + "Artifacts" + ], + "summary": "Read Latest Artifact", + "description": "Retrieve the latest artifact from the artifact table.", + "operationId": "read_latest_artifact_artifacts__key__latest_get", + "parameters": [ + { + "name": "key", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The key of the artifact to retrieve.", + "title": "Key" + }, + "description": "The key of the artifact to retrieve." + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Artifact" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/artifacts/filter": { + "post": { + "tags": [ + "Artifacts" + ], + "summary": "Read Artifacts", + "description": "Retrieve artifacts from the database.", + "operationId": "read_artifacts_artifacts_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_artifacts_artifacts_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Artifact" + }, + "title": "Response Read Artifacts Artifacts Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/artifacts/latest/filter": { + "post": { + "tags": [ + "Artifacts" + ], + "summary": "Read Latest Artifacts", + "description": "Retrieve artifacts from the database.", + "operationId": "read_latest_artifacts_artifacts_latest_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_latest_artifacts_artifacts_latest_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ArtifactCollection" + }, + "title": "Response Read Latest Artifacts Artifacts Latest Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/artifacts/count": { + "post": { + "tags": [ + "Artifacts" + ], + "summary": "Count Artifacts", + "description": "Count artifacts from the database.", + "operationId": "count_artifacts_artifacts_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_count_artifacts_artifacts_count_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "integer", + "title": "Response Count Artifacts Artifacts Count Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/artifacts/latest/count": { + "post": { + "tags": [ + "Artifacts" + ], + "summary": "Count Latest Artifacts", + "description": "Count artifacts from the database.", + "operationId": "count_latest_artifacts_artifacts_latest_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_count_latest_artifacts_artifacts_latest_count_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "integer", + "title": "Response Count Latest Artifacts Artifacts Latest Count Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_schemas/": { + "post": { + "tags": [ + "Block schemas" + ], + "summary": "Create Block Schema", + "operationId": "create_block_schema_block_schemas__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockSchemaCreate" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockSchema" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_schemas/{id}": { + "delete": { + "tags": [ + "Block schemas" + ], + "summary": "Delete Block Schema", + "description": "Delete a block schema by id.", + "operationId": "delete_block_schema_block_schemas__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The block schema id", + "title": "Id" + }, + "description": "The block schema id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "Block schemas" + ], + "summary": "Read Block Schema By Id", + "description": "Get a block schema by id.", + "operationId": "read_block_schema_by_id_block_schemas__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "description": "The block schema id", + "title": "Id" + }, + "description": "The block schema id" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockSchema" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_schemas/filter": { + "post": { + "tags": [ + "Block schemas" + ], + "summary": "Read Block Schemas", + "description": "Read all block schemas, optionally filtered by type", + "operationId": "read_block_schemas_block_schemas_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_block_schemas_block_schemas_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BlockSchema" + }, + "title": "Response Read Block Schemas Block Schemas Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_schemas/checksum/{checksum}": { + "get": { + "tags": [ + "Block schemas" + ], + "summary": "Read Block Schema By Checksum", + "operationId": "read_block_schema_by_checksum_block_schemas_checksum__checksum__get", + "parameters": [ + { + "name": "checksum", + "in": "path", + "required": true, + "schema": { + "type": "string", + "description": "The block schema checksum", + "title": "Checksum" + }, + "description": "The block schema checksum" + }, + { + "name": "version", + "in": "query", + "required": false, + "schema": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "description": "Version of block schema. If not provided the most recently created block schema with the matching checksum will be returned.", + "title": "Version" + }, + "description": "Version of block schema. If not provided the most recently created block schema with the matching checksum will be returned." + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BlockSchema" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/block_capabilities/": { + "get": { + "tags": [ + "Block capabilities" + ], + "summary": "Read Available Block Capabilities", + "operationId": "read_available_block_capabilities_block_capabilities__get", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Response Read Available Block Capabilities Block Capabilities Get" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/collections/views/{view}": { + "get": { + "tags": [ + "Collections" + ], + "summary": "Read View Content", + "description": "Reads the content of a view from the prefect-collection-registry.", + "operationId": "read_view_content_collections_views__view__get", + "parameters": [ + { + "name": "view", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "View" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "object", + "title": "Response Read View Content Collections Views View Get" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/variables/": { + "post": { + "tags": [ + "Variables" + ], + "summary": "Create Variable", + "operationId": "create_variable_variables__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VariableCreate" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Variable" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/variables/{id}": { + "get": { + "tags": [ + "Variables" + ], + "summary": "Read Variable", + "operationId": "read_variable_variables__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Variable" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Variables" + ], + "summary": "Update Variable", + "operationId": "update_variable_variables__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VariableUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Variables" + ], + "summary": "Delete Variable", + "operationId": "delete_variable_variables__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/variables/name/{name}": { + "get": { + "tags": [ + "Variables" + ], + "summary": "Read Variable By Name", + "operationId": "read_variable_by_name_variables_name__name__get", + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Name" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Variable" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Variables" + ], + "summary": "Update Variable By Name", + "operationId": "update_variable_by_name_variables_name__name__patch", + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Name" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VariableUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Variables" + ], + "summary": "Delete Variable By Name", + "operationId": "delete_variable_by_name_variables_name__name__delete", + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Name" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/variables/filter": { + "post": { + "tags": [ + "Variables" + ], + "summary": "Read Variables", + "operationId": "read_variables_variables_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_variables_variables_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Variable" + }, + "title": "Response Read Variables Variables Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/variables/count": { + "post": { + "tags": [ + "Variables" + ], + "summary": "Count Variables", + "operationId": "count_variables_variables_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_count_variables_variables_count_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "integer", + "title": "Response Count Variables Variables Count Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/csrf-token": { + "get": { + "summary": "Create Csrf Token", + "description": "Create or update a CSRF token for a client", + "operationId": "create_csrf_token_csrf_token_get", + "parameters": [ + { + "name": "client", + "in": "query", + "required": true, + "schema": { + "type": "string", + "description": "The client to create a CSRF token for", + "title": "Client" + }, + "description": "The client to create a CSRF token for" + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CsrfToken" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/events": { + "post": { + "tags": [ + "Events" + ], + "summary": "Create Events", + "description": "Record a batch of Events", + "operationId": "create_events_events_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Event" + }, + "title": "Events" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/events/filter": { + "post": { + "tags": [ + "Events" + ], + "summary": "Read Events", + "description": "Queries for Events matching the given filter criteria in the given Account. Returns\nthe first page of results, and the URL to request the next page (if there are more\nresults).", + "operationId": "read_events_events_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_events_events_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EventPage" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/events/filter/next": { + "get": { + "tags": [ + "Events" + ], + "summary": "Read Account Events Page", + "description": "Returns the next page of Events for a previous query against the given Account, and\nthe URL to request the next page (if there are more results).", + "operationId": "read_account_events_page_events_filter_next_get", + "parameters": [ + { + "name": "page-token", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Page-Token" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EventPage" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/events/count-by/{countable}": { + "post": { + "tags": [ + "Events" + ], + "summary": "Count Account Events", + "description": "Returns distinct objects and the count of events associated with them. Objects\nthat can be counted include the day the event occurred, the type of event, or\nthe IDs of the resources associated with the event.", + "operationId": "count_account_events_events_count_by__countable__post", + "parameters": [ + { + "name": "countable", + "in": "path", + "required": true, + "schema": { + "$ref": "#/components/schemas/Countable" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_count_account_events_events_count_by__countable__post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/EventCount" + }, + "title": "Response Count Account Events Events Count By Countable Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/automations/": { + "post": { + "tags": [ + "Automations" + ], + "summary": "Create Automation", + "operationId": "create_automation_automations__post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AutomationCreate" + } + } + } + }, + "responses": { + "201": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Automation" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/automations/{id}": { + "put": { + "tags": [ + "Automations" + ], + "summary": "Update Automation", + "operationId": "update_automation_automations__id__put", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AutomationUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "patch": { + "tags": [ + "Automations" + ], + "summary": "Patch Automation", + "operationId": "patch_automation_automations__id__patch", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/AutomationPartialUpdate" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "tags": [ + "Automations" + ], + "summary": "Delete Automation", + "operationId": "delete_automation_automations__id__delete", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "tags": [ + "Automations" + ], + "summary": "Read Automation", + "operationId": "read_automation_automations__id__get", + "parameters": [ + { + "name": "id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "format": "uuid", + "title": "Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Automation" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/automations/filter": { + "post": { + "tags": [ + "Automations" + ], + "summary": "Read Automations", + "operationId": "read_automations_automations_filter_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_automations_automations_filter_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Automation" + }, + "title": "Response Read Automations Automations Filter Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/automations/count": { + "post": { + "tags": [ + "Automations" + ], + "summary": "Count Automations", + "operationId": "count_automations_automations_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "integer", + "title": "Response Count Automations Automations Count Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/automations/related-to/{resource_id}": { + "get": { + "tags": [ + "Automations" + ], + "summary": "Read Automations Related To Resource", + "operationId": "read_automations_related_to_resource_automations_related_to__resource_id__get", + "parameters": [ + { + "name": "resource_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Resource Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Automation" + }, + "title": "Response Read Automations Related To Resource Automations Related To Resource Id Get" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/automations/owned-by/{resource_id}": { + "delete": { + "tags": [ + "Automations" + ], + "summary": "Delete Automations Owned By Resource", + "operationId": "delete_automations_owned_by_resource_automations_owned_by__resource_id__delete", + "parameters": [ + { + "name": "resource_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Resource Id" + } + }, + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "202": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/templates/validate": { + "post": { + "tags": [ + "Automations" + ], + "summary": "Validate Template", + "operationId": "validate_template_templates_validate_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "string", + "default": "", + "title": "Template" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/ui/flows/count-deployments": { + "post": { + "tags": [ + "Flows", + "UI" + ], + "summary": "Count Deployments By Flow", + "description": "Get deployment counts by flow id.", + "operationId": "count_deployments_by_flow_ui_flows_count_deployments_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_count_deployments_by_flow_ui_flows_count_deployments_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "object", + "additionalProperties": { + "type": "integer" + }, + "title": "Response Count Deployments By Flow Ui Flows Count Deployments Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/ui/flows/next-runs": { + "post": { + "tags": [ + "Flows", + "UI" + ], + "summary": "Next Runs By Flow", + "description": "Get the next flow run by flow id.", + "operationId": "next_runs_by_flow_ui_flows_next_runs_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_next_runs_by_flow_ui_flows_next_runs_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/components/schemas/SimpleNextFlowRun" + }, + { + "type": "null" + } + ] + }, + "title": "Response Next Runs By Flow Ui Flows Next Runs Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/ui/flow_runs/history": { + "post": { + "tags": [ + "Flow Runs", + "UI" + ], + "summary": "Read Flow Run History", + "operationId": "read_flow_run_history_ui_flow_runs_history_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_flow_run_history_ui_flow_runs_history_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SimpleFlowRun" + }, + "title": "Response Read Flow Run History Ui Flow Runs History Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/ui/schemas/validate": { + "post": { + "tags": [ + "UI", + "Schemas" + ], + "summary": "Validate Obj", + "operationId": "validate_obj_ui_schemas_validate_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_validate_obj_ui_schemas_validate_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/ui/task_runs/dashboard/counts": { + "post": { + "tags": [ + "Task Runs", + "UI" + ], + "summary": "Read Dashboard Task Run Counts", + "operationId": "read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Body_read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/TaskRunCount" + }, + "title": "Response Read Dashboard Task Run Counts Ui Task Runs Dashboard Counts Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/ui/task_runs/count": { + "post": { + "tags": [ + "Task Runs", + "UI" + ], + "summary": "Read Task Run Counts By State", + "operationId": "read_task_run_counts_by_state_ui_task_runs_count_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_read_task_run_counts_by_state_ui_task_runs_count_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CountByState" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/admin/settings": { + "get": { + "tags": [ + "Admin" + ], + "summary": "Read Settings", + "description": "Get the current Prefect REST API settings.\n\nSecret setting values will be obfuscated.", + "operationId": "read_settings_admin_settings_get", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Settings" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/admin/version": { + "get": { + "tags": [ + "Admin" + ], + "summary": "Read Version", + "description": "Returns the Prefect version number", + "operationId": "read_version_admin_version_get", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "type": "string", + "title": "Response Read Version Admin Version Get" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/admin/database/clear": { + "post": { + "tags": [ + "Admin" + ], + "summary": "Clear Database", + "description": "Clear all database tables without dropping them.", + "operationId": "clear_database_admin_database_clear_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_clear_database_admin_database_clear_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/admin/database/drop": { + "post": { + "tags": [ + "Admin" + ], + "summary": "Drop Database", + "description": "Drop all database objects.", + "operationId": "drop_database_admin_database_drop_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_drop_database_admin_database_drop_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/admin/database/create": { + "post": { + "tags": [ + "Admin" + ], + "summary": "Create Database", + "description": "Create all database objects.", + "operationId": "create_database_admin_database_create_post", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "allOf": [ + { + "$ref": "#/components/schemas/Body_create_database_admin_database_create_post" + } + ], + "title": "Body" + } + } + } + }, + "responses": { + "204": { + "description": "Successful Response" + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/hello": { + "get": { + "tags": [ + "Root" + ], + "summary": "Hello", + "description": "Say hello!", + "operationId": "hello_hello_get", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/api/ready": { + "get": { + "tags": [ + "Root" + ], + "summary": "Perform Readiness Check", + "operationId": "perform_readiness_check_ready_get", + "parameters": [ + { + "name": "x-prefect-api-version", + "in": "header", + "required": false, + "schema": { + "type": "string", + "title": "X-Prefect-Api-Version" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "Artifact": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + }, + "key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Key", + "description": "An optional unique reference key for this artifact." + }, + "type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Type", + "description": "An identifier that describes the shape of the data field. e.g. 'result', 'table', 'markdown'" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "A markdown-enabled description of the artifact." + }, + "data": { + "anyOf": [ + { + "type": "object" + }, + {}, + { + "type": "null" + } + ], + "title": "Data", + "description": "Data associated with the artifact, e.g. a result.; structure depends on the artifact type." + }, + "metadata_": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Metadata ", + "description": "User-defined artifact metadata. Content must be string key and value pairs." + }, + "flow_run_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Flow Run Id", + "description": "The flow run associated with the artifact." + }, + "task_run_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Task Run Id", + "description": "The task run associated with the artifact." + } + }, + "type": "object", + "title": "Artifact" + }, + "ArtifactCollection": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + }, + "key": { + "type": "string", + "title": "Key", + "description": "An optional unique reference key for this artifact." + }, + "latest_id": { + "type": "string", + "format": "uuid", + "title": "Latest Id", + "description": "The latest artifact ID associated with the key." + }, + "type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Type", + "description": "An identifier that describes the shape of the data field. e.g. 'result', 'table', 'markdown'" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "A markdown-enabled description of the artifact." + }, + "data": { + "anyOf": [ + { + "type": "object" + }, + {}, + { + "type": "null" + } + ], + "title": "Data", + "description": "Data associated with the artifact, e.g. a result.; structure depends on the artifact type." + }, + "metadata_": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Metadata ", + "description": "User-defined artifact metadata. Content must be string key and value pairs." + }, + "flow_run_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Flow Run Id", + "description": "The flow run associated with the artifact." + }, + "task_run_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Task Run Id", + "description": "The task run associated with the artifact." + } + }, + "type": "object", + "required": [ + "key", + "latest_id" + ], + "title": "ArtifactCollection" + }, + "ArtifactCollectionFilter": { + "properties": { + "operator": { + "allOf": [ + { + "$ref": "#/components/schemas/Operator" + } + ], + "description": "Operator for combining filter criteria. Defaults to 'and_'.", + "default": "and_" + }, + "latest_id": { + "anyOf": [ + { + "$ref": "#/components/schemas/ArtifactCollectionFilterLatestId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Artifact.id`" + }, + "key": { + "anyOf": [ + { + "$ref": "#/components/schemas/ArtifactCollectionFilterKey" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Artifact.key`" + }, + "flow_run_id": { + "anyOf": [ + { + "$ref": "#/components/schemas/ArtifactCollectionFilterFlowRunId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Artifact.flow_run_id`" + }, + "task_run_id": { + "anyOf": [ + { + "$ref": "#/components/schemas/ArtifactCollectionFilterTaskRunId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Artifact.task_run_id`" + }, + "type": { + "anyOf": [ + { + "$ref": "#/components/schemas/ArtifactCollectionFilterType" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Artifact.type`" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactCollectionFilter", + "description": "Filter artifact collections. Only artifact collections matching all criteria will be returned" + }, + "ArtifactCollectionFilterFlowRunId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of flow run IDs to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactCollectionFilterFlowRunId", + "description": "Filter by `ArtifactCollection.flow_run_id`." + }, + "ArtifactCollectionFilterKey": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of artifact keys to include" + }, + "like_": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Like ", + "description": "A string to match artifact keys against. This can include SQL wildcard characters like `%` and `_`.", + "examples": [ + "my-artifact-%" + ] + }, + "exists_": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Exists ", + "description": "If `true`, only include artifacts with a non-null key. If `false`, only include artifacts with a null key. Should return all rows in the ArtifactCollection table if specified." + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactCollectionFilterKey", + "description": "Filter by `ArtifactCollection.key`." + }, + "ArtifactCollectionFilterLatestId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of artifact ids to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactCollectionFilterLatestId", + "description": "Filter by `ArtifactCollection.latest_id`." + }, + "ArtifactCollectionFilterTaskRunId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of task run IDs to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactCollectionFilterTaskRunId", + "description": "Filter by `ArtifactCollection.task_run_id`." + }, + "ArtifactCollectionFilterType": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of artifact types to include" + }, + "not_any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Not Any ", + "description": "A list of artifact types to exclude" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactCollectionFilterType", + "description": "Filter by `ArtifactCollection.type`." + }, + "ArtifactCollectionSort": { + "type": "string", + "enum": [ + "CREATED_DESC", + "UPDATED_DESC", + "ID_DESC", + "KEY_DESC", + "KEY_ASC" + ], + "title": "ArtifactCollectionSort", + "description": "Defines artifact collection sorting options." + }, + "ArtifactCreate": { + "properties": { + "key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Key", + "description": "An optional unique reference key for this artifact." + }, + "type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Type", + "description": "An identifier that describes the shape of the data field. e.g. 'result', 'table', 'markdown'" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "A markdown-enabled description of the artifact." + }, + "data": { + "anyOf": [ + { + "type": "object" + }, + {}, + { + "type": "null" + } + ], + "title": "Data", + "description": "Data associated with the artifact, e.g. a result.; structure depends on the artifact type." + }, + "metadata_": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Metadata ", + "description": "User-defined artifact metadata. Content must be string key and value pairs." + }, + "flow_run_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Flow Run Id", + "description": "The flow run associated with the artifact." + }, + "task_run_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Task Run Id", + "description": "The task run associated with the artifact." + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactCreate", + "description": "Data used by the Prefect REST API to create an artifact." + }, + "ArtifactFilter": { + "properties": { + "operator": { + "allOf": [ + { + "$ref": "#/components/schemas/Operator" + } + ], + "description": "Operator for combining filter criteria. Defaults to 'and_'.", + "default": "and_" + }, + "id": { + "anyOf": [ + { + "$ref": "#/components/schemas/ArtifactFilterId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Artifact.id`" + }, + "key": { + "anyOf": [ + { + "$ref": "#/components/schemas/ArtifactFilterKey" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Artifact.key`" + }, + "flow_run_id": { + "anyOf": [ + { + "$ref": "#/components/schemas/ArtifactFilterFlowRunId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Artifact.flow_run_id`" + }, + "task_run_id": { + "anyOf": [ + { + "$ref": "#/components/schemas/ArtifactFilterTaskRunId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Artifact.task_run_id`" + }, + "type": { + "anyOf": [ + { + "$ref": "#/components/schemas/ArtifactFilterType" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Artifact.type`" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactFilter", + "description": "Filter artifacts. Only artifacts matching all criteria will be returned" + }, + "ArtifactFilterFlowRunId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of flow run IDs to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactFilterFlowRunId", + "description": "Filter by `Artifact.flow_run_id`." + }, + "ArtifactFilterId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of artifact ids to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactFilterId", + "description": "Filter by `Artifact.id`." + }, + "ArtifactFilterKey": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of artifact keys to include" + }, + "like_": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Like ", + "description": "A string to match artifact keys against. This can include SQL wildcard characters like `%` and `_`.", + "examples": [ + "my-artifact-%" + ] + }, + "exists_": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Exists ", + "description": "If `true`, only include artifacts with a non-null key. If `false`, only include artifacts with a null key." + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactFilterKey", + "description": "Filter by `Artifact.key`." + }, + "ArtifactFilterTaskRunId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of task run IDs to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactFilterTaskRunId", + "description": "Filter by `Artifact.task_run_id`." + }, + "ArtifactFilterType": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of artifact types to include" + }, + "not_any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Not Any ", + "description": "A list of artifact types to exclude" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactFilterType", + "description": "Filter by `Artifact.type`." + }, + "ArtifactSort": { + "type": "string", + "enum": [ + "CREATED_DESC", + "UPDATED_DESC", + "ID_DESC", + "KEY_DESC", + "KEY_ASC" + ], + "title": "ArtifactSort", + "description": "Defines artifact sorting options." + }, + "ArtifactUpdate": { + "properties": { + "data": { + "anyOf": [ + { + "type": "object" + }, + {}, + { + "type": "null" + } + ], + "title": "Data" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description" + }, + "metadata_": { + "anyOf": [ + { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Metadata " + } + }, + "additionalProperties": false, + "type": "object", + "title": "ArtifactUpdate", + "description": "Data used by the Prefect REST API to update an artifact." + }, + "Automation": { + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of this automation" + }, + "description": { + "type": "string", + "title": "Description", + "description": "A longer description of this automation", + "default": "" + }, + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this automation will be evaluated", + "default": true + }, + "trigger": { + "anyOf": [ + { + "$ref": "#/components/schemas/EventTrigger" + }, + { + "$ref": "#/components/schemas/CompoundTrigger-Output" + }, + { + "$ref": "#/components/schemas/SequenceTrigger-Output" + } + ], + "title": "Trigger", + "description": "The criteria for which events this Automation covers and how it will respond to the presence or absence of those events" + }, + "actions": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/DoNothing" + }, + { + "$ref": "#/components/schemas/RunDeployment" + }, + { + "$ref": "#/components/schemas/PauseDeployment" + }, + { + "$ref": "#/components/schemas/ResumeDeployment" + }, + { + "$ref": "#/components/schemas/CancelFlowRun" + }, + { + "$ref": "#/components/schemas/ChangeFlowRunState" + }, + { + "$ref": "#/components/schemas/PauseWorkQueue" + }, + { + "$ref": "#/components/schemas/ResumeWorkQueue" + }, + { + "$ref": "#/components/schemas/SendNotification" + }, + { + "$ref": "#/components/schemas/CallWebhook" + }, + { + "$ref": "#/components/schemas/PauseAutomation" + }, + { + "$ref": "#/components/schemas/ResumeAutomation" + }, + { + "$ref": "#/components/schemas/SuspendFlowRun" + }, + { + "$ref": "#/components/schemas/PauseWorkPool" + }, + { + "$ref": "#/components/schemas/ResumeWorkPool" + } + ] + }, + "type": "array", + "title": "Actions", + "description": "The actions to perform when this Automation triggers" + }, + "actions_on_trigger": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/DoNothing" + }, + { + "$ref": "#/components/schemas/RunDeployment" + }, + { + "$ref": "#/components/schemas/PauseDeployment" + }, + { + "$ref": "#/components/schemas/ResumeDeployment" + }, + { + "$ref": "#/components/schemas/CancelFlowRun" + }, + { + "$ref": "#/components/schemas/ChangeFlowRunState" + }, + { + "$ref": "#/components/schemas/PauseWorkQueue" + }, + { + "$ref": "#/components/schemas/ResumeWorkQueue" + }, + { + "$ref": "#/components/schemas/SendNotification" + }, + { + "$ref": "#/components/schemas/CallWebhook" + }, + { + "$ref": "#/components/schemas/PauseAutomation" + }, + { + "$ref": "#/components/schemas/ResumeAutomation" + }, + { + "$ref": "#/components/schemas/SuspendFlowRun" + }, + { + "$ref": "#/components/schemas/PauseWorkPool" + }, + { + "$ref": "#/components/schemas/ResumeWorkPool" + } + ] + }, + "type": "array", + "title": "Actions On Trigger", + "description": "The actions to perform when an Automation goes into a triggered state" + }, + "actions_on_resolve": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/DoNothing" + }, + { + "$ref": "#/components/schemas/RunDeployment" + }, + { + "$ref": "#/components/schemas/PauseDeployment" + }, + { + "$ref": "#/components/schemas/ResumeDeployment" + }, + { + "$ref": "#/components/schemas/CancelFlowRun" + }, + { + "$ref": "#/components/schemas/ChangeFlowRunState" + }, + { + "$ref": "#/components/schemas/PauseWorkQueue" + }, + { + "$ref": "#/components/schemas/ResumeWorkQueue" + }, + { + "$ref": "#/components/schemas/SendNotification" + }, + { + "$ref": "#/components/schemas/CallWebhook" + }, + { + "$ref": "#/components/schemas/PauseAutomation" + }, + { + "$ref": "#/components/schemas/ResumeAutomation" + }, + { + "$ref": "#/components/schemas/SuspendFlowRun" + }, + { + "$ref": "#/components/schemas/PauseWorkPool" + }, + { + "$ref": "#/components/schemas/ResumeWorkPool" + } + ] + }, + "type": "array", + "title": "Actions On Resolve", + "description": "The actions to perform when an Automation goes into a resolving state" + }, + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + } + }, + "type": "object", + "required": [ + "name", + "trigger", + "actions" + ], + "title": "Automation" + }, + "AutomationCreate": { + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of this automation" + }, + "description": { + "type": "string", + "title": "Description", + "description": "A longer description of this automation", + "default": "" + }, + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this automation will be evaluated", + "default": true + }, + "trigger": { + "anyOf": [ + { + "$ref": "#/components/schemas/EventTrigger" + }, + { + "$ref": "#/components/schemas/CompoundTrigger-Input" + }, + { + "$ref": "#/components/schemas/SequenceTrigger-Input" + } + ], + "title": "Trigger", + "description": "The criteria for which events this Automation covers and how it will respond to the presence or absence of those events" + }, + "actions": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/DoNothing" + }, + { + "$ref": "#/components/schemas/RunDeployment" + }, + { + "$ref": "#/components/schemas/PauseDeployment" + }, + { + "$ref": "#/components/schemas/ResumeDeployment" + }, + { + "$ref": "#/components/schemas/CancelFlowRun" + }, + { + "$ref": "#/components/schemas/ChangeFlowRunState" + }, + { + "$ref": "#/components/schemas/PauseWorkQueue" + }, + { + "$ref": "#/components/schemas/ResumeWorkQueue" + }, + { + "$ref": "#/components/schemas/SendNotification" + }, + { + "$ref": "#/components/schemas/CallWebhook" + }, + { + "$ref": "#/components/schemas/PauseAutomation" + }, + { + "$ref": "#/components/schemas/ResumeAutomation" + }, + { + "$ref": "#/components/schemas/SuspendFlowRun" + }, + { + "$ref": "#/components/schemas/PauseWorkPool" + }, + { + "$ref": "#/components/schemas/ResumeWorkPool" + } + ] + }, + "type": "array", + "title": "Actions", + "description": "The actions to perform when this Automation triggers" + }, + "actions_on_trigger": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/DoNothing" + }, + { + "$ref": "#/components/schemas/RunDeployment" + }, + { + "$ref": "#/components/schemas/PauseDeployment" + }, + { + "$ref": "#/components/schemas/ResumeDeployment" + }, + { + "$ref": "#/components/schemas/CancelFlowRun" + }, + { + "$ref": "#/components/schemas/ChangeFlowRunState" + }, + { + "$ref": "#/components/schemas/PauseWorkQueue" + }, + { + "$ref": "#/components/schemas/ResumeWorkQueue" + }, + { + "$ref": "#/components/schemas/SendNotification" + }, + { + "$ref": "#/components/schemas/CallWebhook" + }, + { + "$ref": "#/components/schemas/PauseAutomation" + }, + { + "$ref": "#/components/schemas/ResumeAutomation" + }, + { + "$ref": "#/components/schemas/SuspendFlowRun" + }, + { + "$ref": "#/components/schemas/PauseWorkPool" + }, + { + "$ref": "#/components/schemas/ResumeWorkPool" + } + ] + }, + "type": "array", + "title": "Actions On Trigger", + "description": "The actions to perform when an Automation goes into a triggered state" + }, + "actions_on_resolve": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/DoNothing" + }, + { + "$ref": "#/components/schemas/RunDeployment" + }, + { + "$ref": "#/components/schemas/PauseDeployment" + }, + { + "$ref": "#/components/schemas/ResumeDeployment" + }, + { + "$ref": "#/components/schemas/CancelFlowRun" + }, + { + "$ref": "#/components/schemas/ChangeFlowRunState" + }, + { + "$ref": "#/components/schemas/PauseWorkQueue" + }, + { + "$ref": "#/components/schemas/ResumeWorkQueue" + }, + { + "$ref": "#/components/schemas/SendNotification" + }, + { + "$ref": "#/components/schemas/CallWebhook" + }, + { + "$ref": "#/components/schemas/PauseAutomation" + }, + { + "$ref": "#/components/schemas/ResumeAutomation" + }, + { + "$ref": "#/components/schemas/SuspendFlowRun" + }, + { + "$ref": "#/components/schemas/PauseWorkPool" + }, + { + "$ref": "#/components/schemas/ResumeWorkPool" + } + ] + }, + "type": "array", + "title": "Actions On Resolve", + "description": "The actions to perform when an Automation goes into a resolving state" + }, + "owner_resource": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Owner Resource", + "description": "The resource to which this automation belongs" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name", + "trigger", + "actions" + ], + "title": "AutomationCreate" + }, + "AutomationFilter": { + "properties": { + "operator": { + "allOf": [ + { + "$ref": "#/components/schemas/Operator" + } + ], + "description": "Operator for combining filter criteria. Defaults to 'and_'.", + "default": "and_" + }, + "name": { + "anyOf": [ + { + "$ref": "#/components/schemas/AutomationFilterName" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Automation.name`" + }, + "created": { + "anyOf": [ + { + "$ref": "#/components/schemas/AutomationFilterCreated" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Automation.created`" + } + }, + "additionalProperties": false, + "type": "object", + "title": "AutomationFilter" + }, + "AutomationFilterCreated": { + "properties": { + "before_": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Before ", + "description": "Only include automations created before this datetime" + } + }, + "additionalProperties": false, + "type": "object", + "title": "AutomationFilterCreated", + "description": "Filter by `Automation.created`." + }, + "AutomationFilterName": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "Only include automations with names that match any of these strings" + } + }, + "additionalProperties": false, + "type": "object", + "title": "AutomationFilterName", + "description": "Filter by `Automation.created`." + }, + "AutomationPartialUpdate": { + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this automation will be evaluated", + "default": true + } + }, + "additionalProperties": false, + "type": "object", + "title": "AutomationPartialUpdate" + }, + "AutomationSort": { + "type": "string", + "enum": [ + "CREATED_DESC", + "UPDATED_DESC", + "NAME_ASC", + "NAME_DESC" + ], + "title": "AutomationSort", + "description": "Defines automations sorting options." + }, + "AutomationUpdate": { + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of this automation" + }, + "description": { + "type": "string", + "title": "Description", + "description": "A longer description of this automation", + "default": "" + }, + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this automation will be evaluated", + "default": true + }, + "trigger": { + "anyOf": [ + { + "$ref": "#/components/schemas/EventTrigger" + }, + { + "$ref": "#/components/schemas/CompoundTrigger-Input" + }, + { + "$ref": "#/components/schemas/SequenceTrigger-Input" + } + ], + "title": "Trigger", + "description": "The criteria for which events this Automation covers and how it will respond to the presence or absence of those events" + }, + "actions": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/DoNothing" + }, + { + "$ref": "#/components/schemas/RunDeployment" + }, + { + "$ref": "#/components/schemas/PauseDeployment" + }, + { + "$ref": "#/components/schemas/ResumeDeployment" + }, + { + "$ref": "#/components/schemas/CancelFlowRun" + }, + { + "$ref": "#/components/schemas/ChangeFlowRunState" + }, + { + "$ref": "#/components/schemas/PauseWorkQueue" + }, + { + "$ref": "#/components/schemas/ResumeWorkQueue" + }, + { + "$ref": "#/components/schemas/SendNotification" + }, + { + "$ref": "#/components/schemas/CallWebhook" + }, + { + "$ref": "#/components/schemas/PauseAutomation" + }, + { + "$ref": "#/components/schemas/ResumeAutomation" + }, + { + "$ref": "#/components/schemas/SuspendFlowRun" + }, + { + "$ref": "#/components/schemas/PauseWorkPool" + }, + { + "$ref": "#/components/schemas/ResumeWorkPool" + } + ] + }, + "type": "array", + "title": "Actions", + "description": "The actions to perform when this Automation triggers" + }, + "actions_on_trigger": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/DoNothing" + }, + { + "$ref": "#/components/schemas/RunDeployment" + }, + { + "$ref": "#/components/schemas/PauseDeployment" + }, + { + "$ref": "#/components/schemas/ResumeDeployment" + }, + { + "$ref": "#/components/schemas/CancelFlowRun" + }, + { + "$ref": "#/components/schemas/ChangeFlowRunState" + }, + { + "$ref": "#/components/schemas/PauseWorkQueue" + }, + { + "$ref": "#/components/schemas/ResumeWorkQueue" + }, + { + "$ref": "#/components/schemas/SendNotification" + }, + { + "$ref": "#/components/schemas/CallWebhook" + }, + { + "$ref": "#/components/schemas/PauseAutomation" + }, + { + "$ref": "#/components/schemas/ResumeAutomation" + }, + { + "$ref": "#/components/schemas/SuspendFlowRun" + }, + { + "$ref": "#/components/schemas/PauseWorkPool" + }, + { + "$ref": "#/components/schemas/ResumeWorkPool" + } + ] + }, + "type": "array", + "title": "Actions On Trigger", + "description": "The actions to perform when an Automation goes into a triggered state" + }, + "actions_on_resolve": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/DoNothing" + }, + { + "$ref": "#/components/schemas/RunDeployment" + }, + { + "$ref": "#/components/schemas/PauseDeployment" + }, + { + "$ref": "#/components/schemas/ResumeDeployment" + }, + { + "$ref": "#/components/schemas/CancelFlowRun" + }, + { + "$ref": "#/components/schemas/ChangeFlowRunState" + }, + { + "$ref": "#/components/schemas/PauseWorkQueue" + }, + { + "$ref": "#/components/schemas/ResumeWorkQueue" + }, + { + "$ref": "#/components/schemas/SendNotification" + }, + { + "$ref": "#/components/schemas/CallWebhook" + }, + { + "$ref": "#/components/schemas/PauseAutomation" + }, + { + "$ref": "#/components/schemas/ResumeAutomation" + }, + { + "$ref": "#/components/schemas/SuspendFlowRun" + }, + { + "$ref": "#/components/schemas/PauseWorkPool" + }, + { + "$ref": "#/components/schemas/ResumeWorkPool" + } + ] + }, + "type": "array", + "title": "Actions On Resolve", + "description": "The actions to perform when an Automation goes into a resolving state" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name", + "trigger", + "actions" + ], + "title": "AutomationUpdate" + }, + "BlockDocument": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + }, + "name": { + "anyOf": [ + { + "type": "string", + "pattern": "^[^/%&><]+$" + }, + { + "type": "null" + } + ], + "title": "Name", + "description": "The block document's name. Not required for anonymous block documents." + }, + "data": { + "type": "object", + "title": "Data", + "description": "The block document's data" + }, + "block_schema_id": { + "type": "string", + "format": "uuid", + "title": "Block Schema Id", + "description": "A block schema ID" + }, + "block_schema": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSchema" + }, + { + "type": "null" + } + ], + "description": "The associated block schema" + }, + "block_type_id": { + "type": "string", + "format": "uuid", + "title": "Block Type Id", + "description": "A block type ID" + }, + "block_type_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Block Type Name", + "description": "The associated block type's name" + }, + "block_type": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockType" + }, + { + "type": "null" + } + ], + "description": "The associated block type" + }, + "block_document_references": { + "additionalProperties": { + "type": "object" + }, + "type": "object", + "title": "Block Document References", + "description": "Record of the block document's references" + }, + "is_anonymous": { + "type": "boolean", + "title": "Is Anonymous", + "description": "Whether the block is anonymous (anonymous blocks are usually created by Prefect automatically)", + "default": false + } + }, + "type": "object", + "required": [ + "block_schema_id", + "block_type_id" + ], + "title": "BlockDocument", + "description": "An ORM representation of a block document." + }, + "BlockDocumentCreate": { + "properties": { + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name", + "description": "The block document's name. Not required for anonymous block documents." + }, + "data": { + "type": "object", + "title": "Data", + "description": "The block document's data" + }, + "block_schema_id": { + "type": "string", + "format": "uuid", + "title": "Block Schema Id", + "description": "A block schema ID" + }, + "block_type_id": { + "type": "string", + "format": "uuid", + "title": "Block Type Id", + "description": "A block type ID" + }, + "is_anonymous": { + "type": "boolean", + "title": "Is Anonymous", + "description": "Whether the block is anonymous (anonymous blocks are usually created by Prefect automatically)", + "default": false + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "block_schema_id", + "block_type_id" + ], + "title": "BlockDocumentCreate", + "description": "Data used by the Prefect REST API to create a block document." + }, + "BlockDocumentFilter": { + "properties": { + "operator": { + "allOf": [ + { + "$ref": "#/components/schemas/Operator" + } + ], + "description": "Operator for combining filter criteria. Defaults to 'and_'.", + "default": "and_" + }, + "id": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockDocumentFilterId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `BlockDocument.id`" + }, + "is_anonymous": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockDocumentFilterIsAnonymous" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `BlockDocument.is_anonymous`. Defaults to excluding anonymous blocks.", + "default": { + "eq_": false + } + }, + "block_type_id": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockDocumentFilterBlockTypeId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `BlockDocument.block_type_id`" + }, + "name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockDocumentFilterName" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `BlockDocument.name`" + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockDocumentFilter", + "description": "Filter BlockDocuments. Only BlockDocuments matching all criteria will be returned" + }, + "BlockDocumentFilterBlockTypeId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of block type ids to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockDocumentFilterBlockTypeId", + "description": "Filter by `BlockDocument.block_type_id`." + }, + "BlockDocumentFilterId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of block ids to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockDocumentFilterId", + "description": "Filter by `BlockDocument.id`." + }, + "BlockDocumentFilterIsAnonymous": { + "properties": { + "eq_": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Eq ", + "description": "Filter block documents for only those that are or are not anonymous." + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockDocumentFilterIsAnonymous", + "description": "Filter by `BlockDocument.is_anonymous`." + }, + "BlockDocumentFilterName": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of block names to include" + }, + "like_": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Like ", + "description": "A string to match block names against. This can include SQL wildcard characters like `%` and `_`.", + "examples": [ + "my-block%" + ] + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockDocumentFilterName", + "description": "Filter by `BlockDocument.name`." + }, + "BlockDocumentSort": { + "type": "string", + "enum": [ + "NAME_DESC", + "NAME_ASC", + "BLOCK_TYPE_AND_NAME_ASC" + ], + "title": "BlockDocumentSort", + "description": "Defines block document sorting options." + }, + "BlockDocumentUpdate": { + "properties": { + "block_schema_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Block Schema Id", + "description": "A block schema ID" + }, + "data": { + "type": "object", + "title": "Data", + "description": "The block document's data" + }, + "merge_existing_data": { + "type": "boolean", + "title": "Merge Existing Data", + "default": true + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockDocumentUpdate", + "description": "Data used by the Prefect REST API to update a block document." + }, + "BlockSchema": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + }, + "checksum": { + "type": "string", + "title": "Checksum", + "description": "The block schema's unique checksum" + }, + "fields": { + "type": "object", + "title": "Fields", + "description": "The block schema's field schema" + }, + "block_type_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Block Type Id", + "description": "A block type ID" + }, + "block_type": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockType" + }, + { + "type": "null" + } + ], + "description": "The associated block type" + }, + "capabilities": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Capabilities", + "description": "A list of Block capabilities" + }, + "version": { + "type": "string", + "title": "Version", + "description": "Human readable identifier for the block schema", + "default": "non-versioned" + } + }, + "type": "object", + "required": [ + "checksum", + "block_type_id" + ], + "title": "BlockSchema", + "description": "An ORM representation of a block schema." + }, + "BlockSchemaCreate": { + "properties": { + "fields": { + "type": "object", + "title": "Fields", + "description": "The block schema's field schema" + }, + "block_type_id": { + "type": "string", + "format": "uuid", + "title": "Block Type Id", + "description": "A block type ID" + }, + "capabilities": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Capabilities", + "description": "A list of Block capabilities" + }, + "version": { + "type": "string", + "title": "Version", + "description": "Human readable identifier for the block schema", + "default": "non-versioned" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "block_type_id" + ], + "title": "BlockSchemaCreate", + "description": "Data used by the Prefect REST API to create a block schema." + }, + "BlockSchemaFilter": { + "properties": { + "operator": { + "allOf": [ + { + "$ref": "#/components/schemas/Operator" + } + ], + "description": "Operator for combining filter criteria. Defaults to 'and_'.", + "default": "and_" + }, + "block_type_id": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSchemaFilterBlockTypeId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `BlockSchema.block_type_id`" + }, + "block_capabilities": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSchemaFilterCapabilities" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `BlockSchema.capabilities`" + }, + "id": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSchemaFilterId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `BlockSchema.id`" + }, + "version": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSchemaFilterVersion" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `BlockSchema.version`" + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockSchemaFilter", + "description": "Filter BlockSchemas" + }, + "BlockSchemaFilterBlockTypeId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of block type ids to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockSchemaFilterBlockTypeId", + "description": "Filter by `BlockSchema.block_type_id`." + }, + "BlockSchemaFilterCapabilities": { + "properties": { + "all_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "All ", + "description": "A list of block capabilities. Block entities will be returned only if an associated block schema has a superset of the defined capabilities.", + "examples": [ + [ + "write-storage", + "read-storage" + ] + ] + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockSchemaFilterCapabilities", + "description": "Filter by `BlockSchema.capabilities`" + }, + "BlockSchemaFilterId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of IDs to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockSchemaFilterId", + "description": "Filter by BlockSchema.id" + }, + "BlockSchemaFilterVersion": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of block schema versions.", + "examples": [ + [ + "2.0.0", + "2.1.0" + ] + ] + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockSchemaFilterVersion", + "description": "Filter by `BlockSchema.capabilities`" + }, + "BlockType": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + }, + "name": { + "type": "string", + "pattern": "^[^/%&><]+$", + "title": "Name", + "description": "A block type's name" + }, + "slug": { + "type": "string", + "title": "Slug", + "description": "A block type's slug" + }, + "logo_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Logo Url", + "description": "Web URL for the block type's logo" + }, + "documentation_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Documentation Url", + "description": "Web URL for the block type's documentation" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "A short blurb about the corresponding block's intended use" + }, + "code_example": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Code Example", + "description": "A code snippet demonstrating use of the corresponding block" + }, + "is_protected": { + "type": "boolean", + "title": "Is Protected", + "description": "Protected block types cannot be modified via API.", + "default": false + } + }, + "type": "object", + "required": [ + "name", + "slug" + ], + "title": "BlockType", + "description": "An ORM representation of a block type" + }, + "BlockTypeCreate": { + "properties": { + "name": { + "type": "string", + "pattern": "^[^/%&><]+$", + "title": "Name", + "description": "A block type's name" + }, + "slug": { + "type": "string", + "title": "Slug", + "description": "A block type's slug" + }, + "logo_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Logo Url", + "description": "Web URL for the block type's logo" + }, + "documentation_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Documentation Url", + "description": "Web URL for the block type's documentation" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "A short blurb about the corresponding block's intended use" + }, + "code_example": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Code Example", + "description": "A code snippet demonstrating use of the corresponding block" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name", + "slug" + ], + "title": "BlockTypeCreate", + "description": "Data used by the Prefect REST API to create a block type." + }, + "BlockTypeFilter": { + "properties": { + "name": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockTypeFilterName" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `BlockType.name`" + }, + "slug": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockTypeFilterSlug" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `BlockType.slug`" + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockTypeFilter", + "description": "Filter BlockTypes" + }, + "BlockTypeFilterName": { + "properties": { + "like_": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Like ", + "description": "A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.", + "examples": [ + "marvin" + ] + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockTypeFilterName", + "description": "Filter by `BlockType.name`" + }, + "BlockTypeFilterSlug": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of slugs to match" + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockTypeFilterSlug", + "description": "Filter by `BlockType.slug`" + }, + "BlockTypeUpdate": { + "properties": { + "logo_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Logo Url" + }, + "documentation_url": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Documentation Url" + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description" + }, + "code_example": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Code Example" + } + }, + "additionalProperties": false, + "type": "object", + "title": "BlockTypeUpdate", + "description": "Data used by the Prefect REST API to update a block type." + }, + "Body_average_flow_run_lateness_flow_runs_lateness_post": { + "properties": { + "flows": { + "anyOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + }, + { + "type": "null" + } + ] + }, + "flow_runs": { + "anyOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + }, + { + "type": "null" + } + ] + }, + "task_runs": { + "anyOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + }, + { + "type": "null" + } + ] + }, + "deployments": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + }, + { + "type": "null" + } + ] + }, + "work_pools": { + "anyOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + }, + { + "type": "null" + } + ] + }, + "work_pool_queues": { + "anyOf": [ + { + "$ref": "#/components/schemas/WorkQueueFilter" + }, + { + "type": "null" + } + ] + } + }, + "type": "object", + "title": "Body_average_flow_run_lateness_flow_runs_lateness_post" + }, + "Body_bulk_decrement_active_slots_v2_concurrency_limits_decrement_post": { + "properties": { + "slots": { + "type": "integer", + "exclusiveMinimum": 0.0, + "title": "Slots" + }, + "names": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Names", + "min_items": 1 + }, + "occupancy_seconds": { + "anyOf": [ + { + "type": "number", + "exclusiveMinimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Occupancy Seconds" + } + }, + "type": "object", + "required": [ + "slots", + "names" + ], + "title": "Body_bulk_decrement_active_slots_v2_concurrency_limits_decrement_post" + }, + "Body_bulk_increment_active_slots_v2_concurrency_limits_increment_post": { + "properties": { + "slots": { + "type": "integer", + "exclusiveMinimum": 0.0, + "title": "Slots" + }, + "names": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Names", + "min_items": 1 + }, + "mode": { + "type": "string", + "enum": [ + "concurrency", + "rate_limit" + ], + "title": "Mode", + "default": "concurrency" + } + }, + "type": "object", + "required": [ + "slots", + "names" + ], + "title": "Body_bulk_increment_active_slots_v2_concurrency_limits_increment_post" + }, + "Body_clear_database_admin_database_clear_post": { + "properties": { + "confirm": { + "type": "boolean", + "title": "Confirm", + "description": "Pass confirm=True to confirm you want to modify the database.", + "default": false + } + }, + "type": "object", + "title": "Body_clear_database_admin_database_clear_post" + }, + "Body_count_account_events_events_count_by__countable__post": { + "properties": { + "filter": { + "$ref": "#/components/schemas/EventFilter" + }, + "time_unit": { + "allOf": [ + { + "$ref": "#/components/schemas/TimeUnit" + } + ], + "default": "day" + }, + "time_interval": { + "type": "number", + "minimum": 0.01, + "title": "Time Interval", + "default": 1.0 + } + }, + "type": "object", + "required": [ + "filter" + ], + "title": "Body_count_account_events_events_count_by__countable__post" + }, + "Body_count_artifacts_artifacts_count_post": { + "properties": { + "artifacts": { + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + } + }, + "type": "object", + "title": "Body_count_artifacts_artifacts_count_post" + }, + "Body_count_block_documents_block_documents_count_post": { + "properties": { + "block_documents": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockDocumentFilter" + }, + { + "type": "null" + } + ] + }, + "block_types": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockTypeFilter" + }, + { + "type": "null" + } + ] + }, + "block_schemas": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSchemaFilter" + }, + { + "type": "null" + } + ] + } + }, + "type": "object", + "title": "Body_count_block_documents_block_documents_count_post" + }, + "Body_count_deployments_by_flow_ui_flows_count_deployments_post": { + "properties": { + "flow_ids": { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array", + "title": "Flow Ids", + "max_items": 200 + } + }, + "type": "object", + "required": [ + "flow_ids" + ], + "title": "Body_count_deployments_by_flow_ui_flows_count_deployments_post" + }, + "Body_count_deployments_deployments_count_post": { + "properties": { + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + }, + "work_pools": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + } + ] + }, + "work_pool_queues": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkQueueFilter" + } + ] + } + }, + "type": "object", + "title": "Body_count_deployments_deployments_count_post" + }, + "Body_count_flow_runs_flow_runs_count_post": { + "properties": { + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + }, + "work_pools": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + } + ] + }, + "work_pool_queues": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkQueueFilter" + } + ] + } + }, + "type": "object", + "title": "Body_count_flow_runs_flow_runs_count_post" + }, + "Body_count_flows_flows_count_post": { + "properties": { + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + }, + "work_pools": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + } + ] + } + }, + "type": "object", + "title": "Body_count_flows_flows_count_post" + }, + "Body_count_latest_artifacts_artifacts_latest_count_post": { + "properties": { + "artifacts": { + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactCollectionFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + } + }, + "type": "object", + "title": "Body_count_latest_artifacts_artifacts_latest_count_post" + }, + "Body_count_task_runs_task_runs_count_post": { + "properties": { + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + } + }, + "type": "object", + "title": "Body_count_task_runs_task_runs_count_post" + }, + "Body_count_variables_variables_count_post": { + "properties": { + "variables": { + "anyOf": [ + { + "$ref": "#/components/schemas/VariableFilter" + }, + { + "type": "null" + } + ] + } + }, + "type": "object", + "title": "Body_count_variables_variables_count_post" + }, + "Body_count_work_pools_work_pools_count_post": { + "properties": { + "work_pools": { + "anyOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + }, + { + "type": "null" + } + ] + } + }, + "type": "object", + "title": "Body_count_work_pools_work_pools_count_post" + }, + "Body_create_database_admin_database_create_post": { + "properties": { + "confirm": { + "type": "boolean", + "title": "Confirm", + "description": "Pass confirm=True to confirm you want to modify the database.", + "default": false + } + }, + "type": "object", + "title": "Body_create_database_admin_database_create_post" + }, + "Body_create_flow_run_input_flow_runs__id__input_post": { + "properties": { + "key": { + "type": "string", + "title": "Key", + "description": "The input key" + }, + "value": { + "type": "string", + "format": "binary", + "title": "Value", + "description": "The value of the input" + }, + "sender": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Sender", + "description": "The sender of the input" + } + }, + "type": "object", + "required": [ + "key", + "value" + ], + "title": "Body_create_flow_run_input_flow_runs__id__input_post" + }, + "Body_drop_database_admin_database_drop_post": { + "properties": { + "confirm": { + "type": "boolean", + "title": "Confirm", + "description": "Pass confirm=True to confirm you want to modify the database.", + "default": false + } + }, + "type": "object", + "title": "Body_drop_database_admin_database_drop_post" + }, + "Body_filter_flow_run_input_flow_runs__id__input_filter_post": { + "properties": { + "prefix": { + "type": "string", + "title": "Prefix", + "description": "The input key prefix" + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "The maximum number of results to return", + "default": 1 + }, + "exclude_keys": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Exclude Keys", + "description": "Exclude inputs with these keys", + "default": [] + } + }, + "type": "object", + "required": [ + "prefix" + ], + "title": "Body_filter_flow_run_input_flow_runs__id__input_filter_post" + }, + "Body_flow_run_history_flow_runs_history_post": { + "properties": { + "history_start": { + "type": "string", + "format": "date-time", + "title": "History Start", + "description": "The history's start time." + }, + "history_end": { + "type": "string", + "format": "date-time", + "title": "History End", + "description": "The history's end time." + }, + "history_interval": { + "type": "number", + "format": "time-delta", + "title": "History Interval", + "description": "The size of each history interval, in seconds. Must be at least 1 second." + }, + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + }, + "work_pools": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + } + ] + }, + "work_queues": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkQueueFilter" + } + ] + } + }, + "type": "object", + "required": [ + "history_start", + "history_end", + "history_interval" + ], + "title": "Body_flow_run_history_flow_runs_history_post" + }, + "Body_get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post": { + "properties": { + "deployment_ids": { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array", + "title": "Deployment Ids", + "description": "The deployment IDs to get scheduled runs for" + }, + "scheduled_before": { + "type": "string", + "format": "date-time", + "title": "Scheduled Before", + "description": "The maximum time to look for scheduled flow runs" + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "required": [ + "deployment_ids" + ], + "title": "Body_get_scheduled_flow_runs_for_deployments_deployments_get_scheduled_flow_runs_post" + }, + "Body_get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post": { + "properties": { + "work_queue_names": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Work Queue Names", + "description": "The names of work pool queues" + }, + "scheduled_before": { + "type": "string", + "format": "date-time", + "title": "Scheduled Before", + "description": "The maximum time to look for scheduled flow runs" + }, + "scheduled_after": { + "type": "string", + "format": "date-time", + "title": "Scheduled After", + "description": "The minimum time to look for scheduled flow runs" + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_get_scheduled_flow_runs_work_pools__name__get_scheduled_flow_runs_post" + }, + "Body_next_runs_by_flow_ui_flows_next_runs_post": { + "properties": { + "flow_ids": { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array", + "title": "Flow Ids", + "max_items": 200 + } + }, + "type": "object", + "required": [ + "flow_ids" + ], + "title": "Body_next_runs_by_flow_ui_flows_next_runs_post" + }, + "Body_read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post": { + "properties": { + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_all_concurrency_limits_v2_v2_concurrency_limits_filter_post" + }, + "Body_read_artifacts_artifacts_filter_post": { + "properties": { + "sort": { + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactSort" + } + ], + "default": "ID_DESC" + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "artifacts": { + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_artifacts_artifacts_filter_post" + }, + "Body_read_automations_automations_filter_post": { + "properties": { + "sort": { + "allOf": [ + { + "$ref": "#/components/schemas/AutomationSort" + } + ], + "default": "NAME_ASC" + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "automations": { + "anyOf": [ + { + "$ref": "#/components/schemas/AutomationFilter" + }, + { + "type": "null" + } + ] + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_automations_automations_filter_post" + }, + "Body_read_block_documents_block_documents_filter_post": { + "properties": { + "block_documents": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockDocumentFilter" + }, + { + "type": "null" + } + ] + }, + "block_types": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockTypeFilter" + }, + { + "type": "null" + } + ] + }, + "block_schemas": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSchemaFilter" + }, + { + "type": "null" + } + ] + }, + "include_secrets": { + "type": "boolean", + "title": "Include Secrets", + "description": "Whether to include sensitive values in the block document.", + "default": false + }, + "sort": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockDocumentSort" + }, + { + "type": "null" + } + ], + "default": "NAME_ASC" + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_block_documents_block_documents_filter_post" + }, + "Body_read_block_schemas_block_schemas_filter_post": { + "properties": { + "block_schemas": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSchemaFilter" + }, + { + "type": "null" + } + ] + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_block_schemas_block_schemas_filter_post" + }, + "Body_read_block_types_block_types_filter_post": { + "properties": { + "block_types": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockTypeFilter" + }, + { + "type": "null" + } + ] + }, + "block_schemas": { + "anyOf": [ + { + "$ref": "#/components/schemas/BlockSchemaFilter" + }, + { + "type": "null" + } + ] + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_block_types_block_types_filter_post" + }, + "Body_read_concurrency_limits_concurrency_limits_filter_post": { + "properties": { + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_concurrency_limits_concurrency_limits_filter_post" + }, + "Body_read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post": { + "properties": { + "task_runs": { + "$ref": "#/components/schemas/TaskRunFilter" + }, + "flows": { + "anyOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + }, + { + "type": "null" + } + ] + }, + "flow_runs": { + "anyOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + }, + { + "type": "null" + } + ] + }, + "deployments": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + }, + { + "type": "null" + } + ] + }, + "work_pools": { + "anyOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + }, + { + "type": "null" + } + ] + }, + "work_queues": { + "anyOf": [ + { + "$ref": "#/components/schemas/WorkQueueFilter" + }, + { + "type": "null" + } + ] + } + }, + "type": "object", + "required": [ + "task_runs" + ], + "title": "Body_read_dashboard_task_run_counts_ui_task_runs_dashboard_counts_post" + }, + "Body_read_deployments_deployments_filter_post": { + "properties": { + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + }, + "work_pools": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + } + ] + }, + "work_pool_queues": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkQueueFilter" + } + ] + }, + "sort": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentSort" + } + ], + "default": "NAME_ASC" + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_deployments_deployments_filter_post" + }, + "Body_read_events_events_filter_post": { + "properties": { + "filter": { + "anyOf": [ + { + "$ref": "#/components/schemas/EventFilter" + }, + { + "type": "null" + } + ], + "description": "Additional optional filter criteria to narrow down the set of Events" + }, + "limit": { + "type": "integer", + "maximum": 50.0, + "minimum": 0.0, + "title": "Limit", + "description": "The number of events to return with each page", + "default": 50 + } + }, + "type": "object", + "title": "Body_read_events_events_filter_post" + }, + "Body_read_flow_run_history_ui_flow_runs_history_post": { + "properties": { + "sort": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunSort" + } + ], + "default": "EXPECTED_START_TIME_DESC" + }, + "limit": { + "type": "integer", + "maximum": 1000.0, + "title": "Limit", + "default": 1000 + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + }, + "work_pools": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + } + ] + } + }, + "type": "object", + "title": "Body_read_flow_run_history_ui_flow_runs_history_post" + }, + "Body_read_flow_run_notification_policies_flow_run_notification_policies_filter_post": { + "properties": { + "flow_run_notification_policy_filter": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunNotificationPolicyFilter" + } + ] + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_flow_run_notification_policies_flow_run_notification_policies_filter_post" + }, + "Body_read_flow_runs_flow_runs_filter_post": { + "properties": { + "sort": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunSort" + } + ], + "default": "ID_DESC" + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "flows": { + "anyOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + }, + { + "type": "null" + } + ] + }, + "flow_runs": { + "anyOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + }, + { + "type": "null" + } + ] + }, + "task_runs": { + "anyOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + }, + { + "type": "null" + } + ] + }, + "deployments": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + }, + { + "type": "null" + } + ] + }, + "work_pools": { + "anyOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + }, + { + "type": "null" + } + ] + }, + "work_pool_queues": { + "anyOf": [ + { + "$ref": "#/components/schemas/WorkQueueFilter" + }, + { + "type": "null" + } + ] + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_flow_runs_flow_runs_filter_post" + }, + "Body_read_flows_flows_filter_post": { + "properties": { + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + }, + "work_pools": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + } + ] + }, + "sort": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowSort" + } + ], + "default": "NAME_ASC" + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_flows_flows_filter_post" + }, + "Body_read_latest_artifacts_artifacts_latest_filter_post": { + "properties": { + "sort": { + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactCollectionSort" + } + ], + "default": "ID_DESC" + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "artifacts": { + "allOf": [ + { + "$ref": "#/components/schemas/ArtifactCollectionFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_latest_artifacts_artifacts_latest_filter_post" + }, + "Body_read_logs_logs_filter_post": { + "properties": { + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "logs": { + "allOf": [ + { + "$ref": "#/components/schemas/LogFilter" + } + ] + }, + "sort": { + "allOf": [ + { + "$ref": "#/components/schemas/LogSort" + } + ], + "default": "TIMESTAMP_ASC" + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_logs_logs_filter_post" + }, + "Body_read_saved_searches_saved_searches_filter_post": { + "properties": { + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_saved_searches_saved_searches_filter_post" + }, + "Body_read_task_run_counts_by_state_ui_task_runs_count_post": { + "properties": { + "flows": { + "anyOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + }, + { + "type": "null" + } + ] + }, + "flow_runs": { + "anyOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + }, + { + "type": "null" + } + ] + }, + "task_runs": { + "anyOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + }, + { + "type": "null" + } + ] + }, + "deployments": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + }, + { + "type": "null" + } + ] + } + }, + "type": "object", + "title": "Body_read_task_run_counts_by_state_ui_task_runs_count_post" + }, + "Body_read_task_runs_task_runs_filter_post": { + "properties": { + "sort": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunSort" + } + ], + "default": "ID_DESC" + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_task_runs_task_runs_filter_post" + }, + "Body_read_variables_variables_filter_post": { + "properties": { + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "variables": { + "anyOf": [ + { + "$ref": "#/components/schemas/VariableFilter" + }, + { + "type": "null" + } + ] + }, + "sort": { + "allOf": [ + { + "$ref": "#/components/schemas/VariableSort" + } + ], + "default": "NAME_ASC" + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_variables_variables_filter_post" + }, + "Body_read_work_pools_work_pools_filter_post": { + "properties": { + "work_pools": { + "anyOf": [ + { + "$ref": "#/components/schemas/WorkPoolFilter" + }, + { + "type": "null" + } + ] + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_work_pools_work_pools_filter_post" + }, + "Body_read_work_queue_runs_work_queues__id__get_runs_post": { + "properties": { + "scheduled_before": { + "type": "string", + "format": "date-time", + "title": "Scheduled Before", + "description": "Only flow runs scheduled to start before this time will be returned." + }, + "agent_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Agent Id", + "description": "An optional unique identifier for the agent making this query. If provided, the Prefect REST API will track the last time this agent polled the work queue." + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_work_queue_runs_work_queues__id__get_runs_post" + }, + "Body_read_work_queues_work_pools__work_pool_name__queues_filter_post": { + "properties": { + "work_queues": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkQueueFilter" + } + ] + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_work_queues_work_pools__work_pool_name__queues_filter_post" + }, + "Body_read_work_queues_work_queues_filter_post": { + "properties": { + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "work_queues": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkQueueFilter" + } + ] + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_work_queues_work_queues_filter_post" + }, + "Body_read_workers_work_pools__work_pool_name__workers_filter_post": { + "properties": { + "workers": { + "allOf": [ + { + "$ref": "#/components/schemas/WorkerFilter" + } + ] + }, + "offset": { + "type": "integer", + "minimum": 0.0, + "title": "Offset", + "default": 0 + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "Defaults to PREFECT_API_DEFAULT_LIMIT if not provided." + } + }, + "type": "object", + "title": "Body_read_workers_work_pools__work_pool_name__workers_filter_post" + }, + "Body_reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post": { + "properties": { + "slot_override": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Slot Override", + "description": "Manual override for active concurrency limit slots." + } + }, + "type": "object", + "title": "Body_reset_concurrency_limit_by_tag_concurrency_limits_tag__tag__reset_post" + }, + "Body_resume_flow_run_flow_runs__id__resume_post": { + "properties": { + "run_input": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Run Input" + } + }, + "type": "object", + "title": "Body_resume_flow_run_flow_runs__id__resume_post" + }, + "Body_schedule_deployment_deployments__id__schedule_post": { + "properties": { + "start_time": { + "type": "string", + "format": "date-time", + "title": "Start Time", + "description": "The earliest date to schedule" + }, + "end_time": { + "type": "string", + "format": "date-time", + "title": "End Time", + "description": "The latest date to schedule" + }, + "min_time": { + "type": "number", + "format": "time-delta", + "title": "Min Time", + "description": "Runs will be scheduled until at least this long after the `start_time`" + }, + "min_runs": { + "type": "integer", + "title": "Min Runs", + "description": "The minimum number of runs to schedule" + }, + "max_runs": { + "type": "integer", + "title": "Max Runs", + "description": "The maximum number of runs to schedule" + } + }, + "type": "object", + "title": "Body_schedule_deployment_deployments__id__schedule_post" + }, + "Body_set_flow_run_state_flow_runs__id__set_state_post": { + "properties": { + "state": { + "allOf": [ + { + "$ref": "#/components/schemas/StateCreate" + } + ], + "description": "The intended state." + }, + "force": { + "type": "boolean", + "title": "Force", + "description": "If false, orchestration rules will be applied that may alter or prevent the state transition. If True, orchestration rules are not applied.", + "default": false + } + }, + "type": "object", + "required": [ + "state" + ], + "title": "Body_set_flow_run_state_flow_runs__id__set_state_post" + }, + "Body_set_task_run_state_task_runs__id__set_state_post": { + "properties": { + "state": { + "allOf": [ + { + "$ref": "#/components/schemas/StateCreate" + } + ], + "description": "The intended state." + }, + "force": { + "type": "boolean", + "title": "Force", + "description": "If false, orchestration rules will be applied that may alter or prevent the state transition. If True, orchestration rules are not applied.", + "default": false + } + }, + "type": "object", + "required": [ + "state" + ], + "title": "Body_set_task_run_state_task_runs__id__set_state_post" + }, + "Body_task_run_history_task_runs_history_post": { + "properties": { + "history_start": { + "type": "string", + "format": "date-time", + "title": "History Start", + "description": "The history's start time." + }, + "history_end": { + "type": "string", + "format": "date-time", + "title": "History End", + "description": "The history's end time." + }, + "history_interval": { + "type": "number", + "format": "time-delta", + "title": "History Interval", + "description": "The size of each history interval, in seconds. Must be at least 1 second." + }, + "flows": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowFilter" + } + ] + }, + "flow_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunFilter" + } + ] + }, + "task_runs": { + "allOf": [ + { + "$ref": "#/components/schemas/TaskRunFilter" + } + ] + }, + "deployments": { + "allOf": [ + { + "$ref": "#/components/schemas/DeploymentFilter" + } + ] + } + }, + "type": "object", + "required": [ + "history_start", + "history_end", + "history_interval" + ], + "title": "Body_task_run_history_task_runs_history_post" + }, + "Body_validate_obj_ui_schemas_validate_post": { + "properties": { + "json_schema": { + "type": "object", + "title": "Json Schema" + }, + "values": { + "type": "object", + "title": "Values" + } + }, + "type": "object", + "required": [ + "json_schema", + "values" + ], + "title": "Body_validate_obj_ui_schemas_validate_post" + }, + "Body_worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post": { + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The worker process name" + }, + "heartbeat_interval_seconds": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "title": "Heartbeat Interval Seconds", + "description": "The worker's heartbeat interval in seconds" + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "Body_worker_heartbeat_work_pools__work_pool_name__workers_heartbeat_post" + }, + "CallWebhook": { + "properties": { + "type": { + "type": "string", + "enum": [ + "call-webhook" + ], + "const": "call-webhook", + "title": "Type", + "default": "call-webhook" + }, + "block_document_id": { + "type": "string", + "format": "uuid", + "title": "Block Document Id", + "description": "The identifier of the webhook block to use" + }, + "payload": { + "type": "string", + "title": "Payload", + "description": "An optional templatable payload to send when calling the webhook.", + "default": "" + } + }, + "type": "object", + "required": [ + "block_document_id" + ], + "title": "CallWebhook", + "description": "Call a webhook when an Automation is triggered." + }, + "CancelFlowRun": { + "properties": { + "type": { + "type": "string", + "enum": [ + "cancel-flow-run" + ], + "const": "cancel-flow-run", + "title": "Type", + "default": "cancel-flow-run" + } + }, + "type": "object", + "title": "CancelFlowRun", + "description": "Cancels a flow run associated with the trigger" + }, + "ChangeFlowRunState": { + "properties": { + "type": { + "type": "string", + "enum": [ + "change-flow-run-state" + ], + "const": "change-flow-run-state", + "title": "Type", + "default": "change-flow-run-state" + }, + "name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Name", + "description": "The name of the state to change the flow run to" + }, + "state": { + "allOf": [ + { + "$ref": "#/components/schemas/StateType" + } + ], + "description": "The type of the state to change the flow run to" + }, + "message": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Message", + "description": "An optional message to associate with the state change" + } + }, + "type": "object", + "required": [ + "state" + ], + "title": "ChangeFlowRunState", + "description": "Changes the state of a flow run associated with the trigger" + }, + "CompoundTrigger-Input": { + "properties": { + "type": { + "type": "string", + "enum": [ + "compound" + ], + "const": "compound", + "title": "Type", + "default": "compound" + }, + "id": { + "type": "string", + "format": "uuid", + "title": "Id", + "description": "The unique ID of this trigger" + }, + "triggers": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/EventTrigger" + }, + { + "$ref": "#/components/schemas/CompoundTrigger-Input" + }, + { + "$ref": "#/components/schemas/SequenceTrigger-Input" + } + ] + }, + "type": "array", + "title": "Triggers" + }, + "within": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Within" + }, + "require": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string", + "enum": [ + "any", + "all" + ] + } + ], + "title": "Require" + } + }, + "type": "object", + "required": [ + "triggers", + "within", + "require" + ], + "title": "CompoundTrigger", + "description": "A composite trigger that requires some number of triggers to have\nfired within the given time period" + }, + "CompoundTrigger-Output": { + "properties": { + "type": { + "type": "string", + "enum": [ + "compound" + ], + "const": "compound", + "title": "Type", + "default": "compound" + }, + "id": { + "type": "string", + "format": "uuid", + "title": "Id", + "description": "The unique ID of this trigger" + }, + "triggers": { + "items": { + "anyOf": [ + { + "$ref": "#/components/schemas/EventTrigger" + }, + { + "$ref": "#/components/schemas/CompoundTrigger-Output" + }, + { + "$ref": "#/components/schemas/SequenceTrigger-Output" + } + ] + }, + "type": "array", + "title": "Triggers" + }, + "within": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Within" + }, + "require": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "string", + "enum": [ + "any", + "all" + ] + } + ], + "title": "Require" + } + }, + "type": "object", + "required": [ + "triggers", + "within", + "require" + ], + "title": "CompoundTrigger", + "description": "A composite trigger that requires some number of triggers to have\nfired within the given time period" + }, + "ConcurrencyLimit": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + }, + "tag": { + "type": "string", + "title": "Tag", + "description": "A tag the concurrency limit is applied to." + }, + "concurrency_limit": { + "type": "integer", + "title": "Concurrency Limit", + "description": "The concurrency limit." + }, + "active_slots": { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array", + "title": "Active Slots", + "description": "A list of active run ids using a concurrency slot" + } + }, + "type": "object", + "required": [ + "tag", + "concurrency_limit" + ], + "title": "ConcurrencyLimit", + "description": "An ORM representation of a concurrency limit." + }, + "ConcurrencyLimitCreate": { + "properties": { + "tag": { + "type": "string", + "title": "Tag", + "description": "A tag the concurrency limit is applied to." + }, + "concurrency_limit": { + "type": "integer", + "title": "Concurrency Limit", + "description": "The concurrency limit." + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "tag", + "concurrency_limit" + ], + "title": "ConcurrencyLimitCreate", + "description": "Data used by the Prefect REST API to create a concurrency limit." + }, + "ConcurrencyLimitV2": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + }, + "active": { + "type": "boolean", + "title": "Active", + "description": "Whether the concurrency limit is active.", + "default": true + }, + "name": { + "type": "string", + "pattern": "^[^/%&><]+$", + "title": "Name", + "description": "The name of the concurrency limit." + }, + "limit": { + "type": "integer", + "title": "Limit", + "description": "The concurrency limit." + }, + "active_slots": { + "type": "integer", + "title": "Active Slots", + "description": "The number of active slots.", + "default": 0 + }, + "denied_slots": { + "type": "integer", + "title": "Denied Slots", + "description": "The number of denied slots.", + "default": 0 + }, + "slot_decay_per_second": { + "type": "number", + "title": "Slot Decay Per Second", + "description": "The decay rate for active slots when used as a rate limit.", + "default": 0 + }, + "avg_slot_occupancy_seconds": { + "type": "number", + "title": "Avg Slot Occupancy Seconds", + "description": "The average amount of time a slot is occupied.", + "default": 2.0 + } + }, + "type": "object", + "required": [ + "name", + "limit" + ], + "title": "ConcurrencyLimitV2", + "description": "An ORM representation of a v2 concurrency limit." + }, + "ConcurrencyLimitV2Create": { + "properties": { + "active": { + "type": "boolean", + "title": "Active", + "description": "Whether the concurrency limit is active.", + "default": true + }, + "name": { + "type": "string", + "pattern": "^[^/%&><]+$", + "title": "Name", + "description": "The name of the concurrency limit." + }, + "limit": { + "type": "integer", + "minimum": 0.0, + "title": "Limit", + "description": "The concurrency limit." + }, + "active_slots": { + "type": "integer", + "minimum": 0.0, + "title": "Active Slots", + "description": "The number of active slots.", + "default": 0 + }, + "denied_slots": { + "type": "integer", + "minimum": 0.0, + "title": "Denied Slots", + "description": "The number of denied slots.", + "default": 0 + }, + "slot_decay_per_second": { + "type": "number", + "minimum": 0.0, + "title": "Slot Decay Per Second", + "description": "The decay rate for active slots when used as a rate limit.", + "default": 0 + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name", + "limit" + ], + "title": "ConcurrencyLimitV2Create", + "description": "Data used by the Prefect REST API to create a v2 concurrency limit." + }, + "ConcurrencyLimitV2Update": { + "properties": { + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Active" + }, + "name": { + "anyOf": [ + { + "type": "string", + "pattern": "^[^/%&><]+$" + }, + { + "type": "null" + } + ], + "title": "Name" + }, + "limit": { + "anyOf": [ + { + "type": "integer", + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Limit" + }, + "active_slots": { + "anyOf": [ + { + "type": "integer", + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Active Slots" + }, + "denied_slots": { + "anyOf": [ + { + "type": "integer", + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Denied Slots" + }, + "slot_decay_per_second": { + "anyOf": [ + { + "type": "number", + "minimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Slot Decay Per Second" + } + }, + "additionalProperties": false, + "type": "object", + "title": "ConcurrencyLimitV2Update", + "description": "Data used by the Prefect REST API to update a v2 concurrency limit." + }, + "Constant": { + "properties": { + "input_type": { + "type": "string", + "enum": [ + "constant" + ], + "const": "constant", + "title": "Input Type", + "default": "constant" + }, + "type": { + "type": "string", + "title": "Type" + } + }, + "type": "object", + "required": [ + "type" + ], + "title": "Constant", + "description": "Represents constant input value to a task run." + }, + "CountByState": { + "properties": { + "COMPLETED": { + "type": "integer", + "title": "Completed", + "default": 0 + }, + "PENDING": { + "type": "integer", + "title": "Pending", + "default": 0 + }, + "RUNNING": { + "type": "integer", + "title": "Running", + "default": 0 + }, + "FAILED": { + "type": "integer", + "title": "Failed", + "default": 0 + }, + "CANCELLED": { + "type": "integer", + "title": "Cancelled", + "default": 0 + }, + "CRASHED": { + "type": "integer", + "title": "Crashed", + "default": 0 + }, + "PAUSED": { + "type": "integer", + "title": "Paused", + "default": 0 + }, + "CANCELLING": { + "type": "integer", + "title": "Cancelling", + "default": 0 + }, + "SCHEDULED": { + "type": "integer", + "title": "Scheduled", + "default": 0 + } + }, + "type": "object", + "title": "CountByState" + }, + "Countable": { + "type": "string", + "enum": [ + "day", + "time", + "event", + "resource" + ], + "title": "Countable" + }, + "CreatedBy": { + "properties": { + "id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Id", + "description": "The id of the creator of the object." + }, + "type": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Type", + "description": "The type of the creator of the object." + }, + "display_value": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Display Value", + "description": "The display value for the creator." + } + }, + "type": "object", + "title": "CreatedBy" + }, + "CronSchedule": { + "properties": { + "cron": { + "type": "string", + "title": "Cron", + "examples": [ + "0 0 * * *" + ] + }, + "timezone": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Timezone", + "examples": [ + "America/New_York" + ] + }, + "day_or": { + "type": "boolean", + "title": "Day Or", + "description": "Control croniter behavior for handling day and day_of_week entries.", + "default": true + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "cron" + ], + "title": "CronSchedule", + "description": "Cron schedule\n\nNOTE: If the timezone is a DST-observing one, then the schedule will adjust\nitself appropriately. Cron's rules for DST are based on schedule times, not\nintervals. This means that an hourly cron schedule will fire on every new\nschedule hour, not every elapsed hour; for example, when clocks are set back\nthis will result in a two-hour pause as the schedule will fire *the first\ntime* 1am is reached and *the first time* 2am is reached, 120 minutes later.\nLonger schedules, such as one that fires at 9am every morning, will\nautomatically adjust for DST.\n\nArgs:\n cron (str): a valid cron string\n timezone (str): a valid timezone string in IANA tzdata format (for example,\n America/New_York).\n day_or (bool, optional): Control how croniter handles `day` and `day_of_week`\n entries. Defaults to True, matching cron which connects those values using\n OR. If the switch is set to False, the values are connected using AND. This\n behaves like fcron and enables you to e.g. define a job that executes each\n 2nd friday of a month by setting the days of month and the weekday." + }, + "CsrfToken": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + }, + "token": { + "type": "string", + "title": "Token", + "description": "The CSRF token" + }, + "client": { + "type": "string", + "title": "Client", + "description": "The client id associated with the CSRF token" + }, + "expiration": { + "type": "string", + "format": "date-time", + "title": "Expiration", + "description": "The expiration time of the CSRF token" + } + }, + "type": "object", + "required": [ + "token", + "client", + "expiration" + ], + "title": "CsrfToken" + }, + "DependencyResult": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "upstream_dependencies": { + "items": { + "$ref": "#/components/schemas/TaskRunResult" + }, + "type": "array", + "title": "Upstream Dependencies" + }, + "state": { + "$ref": "#/components/schemas/State" + }, + "expected_start_time": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Expected Start Time" + }, + "start_time": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Start Time" + }, + "end_time": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "End Time" + }, + "total_run_time": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Total Run Time" + }, + "estimated_run_time": { + "anyOf": [ + { + "type": "number" + }, + { + "type": "null" + } + ], + "title": "Estimated Run Time" + }, + "untrackable_result": { + "type": "boolean", + "title": "Untrackable Result" + } + }, + "type": "object", + "required": [ + "id", + "name", + "upstream_dependencies", + "state", + "expected_start_time", + "start_time", + "end_time", + "total_run_time", + "estimated_run_time", + "untrackable_result" + ], + "title": "DependencyResult" + }, + "DeploymentCreate": { + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the deployment.", + "examples": [ + "my-deployment" + ] + }, + "flow_id": { + "type": "string", + "format": "uuid", + "title": "Flow Id", + "description": "The ID of the flow associated with the deployment." + }, + "is_schedule_active": { + "type": "boolean", + "title": "Is Schedule Active", + "description": "Whether the schedule is active.", + "default": true + }, + "paused": { + "type": "boolean", + "title": "Paused", + "description": "Whether or not the deployment is paused.", + "default": false + }, + "schedules": { + "items": { + "$ref": "#/components/schemas/DeploymentScheduleCreate" + }, + "type": "array", + "title": "Schedules", + "description": "A list of schedules for the deployment." + }, + "enforce_parameter_schema": { + "type": "boolean", + "title": "Enforce Parameter Schema", + "description": "Whether or not the deployment should enforce the parameter schema.", + "default": true + }, + "parameter_openapi_schema": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Parameter Openapi Schema", + "description": "The parameter schema of the flow, including defaults." + }, + "parameters": { + "type": "object", + "title": "Parameters", + "description": "Parameters for flow runs scheduled by the deployment." + }, + "tags": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Tags", + "description": "A list of deployment tags.", + "examples": [ + [ + "tag-1", + "tag-2" + ] + ] + }, + "pull_steps": { + "anyOf": [ + { + "items": { + "type": "object" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Pull Steps" + }, + "manifest_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Manifest Path" + }, + "work_queue_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Work Queue Name" + }, + "work_pool_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Work Pool Name", + "description": "The name of the deployment's work pool.", + "examples": [ + "my-work-pool" + ] + }, + "storage_document_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Storage Document Id" + }, + "infrastructure_document_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Infrastructure Document Id" + }, + "schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/IntervalSchedule" + }, + { + "$ref": "#/components/schemas/CronSchedule" + }, + { + "$ref": "#/components/schemas/RRuleSchedule" + }, + { + "type": "null" + } + ], + "title": "Schedule", + "description": "The schedule for the deployment." + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description" + }, + "path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Path" + }, + "version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Version" + }, + "entrypoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Entrypoint" + }, + "job_variables": { + "type": "object", + "title": "Job Variables", + "description": "Overrides for the flow's infrastructure configuration." + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "name", + "flow_id" + ], + "title": "DeploymentCreate", + "description": "Data used by the Prefect REST API to create a deployment." + }, + "DeploymentFilter": { + "properties": { + "operator": { + "allOf": [ + { + "$ref": "#/components/schemas/Operator" + } + ], + "description": "Operator for combining filter criteria. Defaults to 'and_'.", + "default": "and_" + }, + "id": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentFilterId" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Deployment.id`" + }, + "name": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentFilterName" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Deployment.name`" + }, + "paused": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentFilterPaused" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Deployment.paused`" + }, + "is_schedule_active": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentFilterIsScheduleActive" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Deployment.is_schedule_active`" + }, + "tags": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentFilterTags" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Deployment.tags`" + }, + "work_queue_name": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentFilterWorkQueueName" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for `Deployment.work_queue_name`" + } + }, + "additionalProperties": false, + "type": "object", + "title": "DeploymentFilter", + "description": "Filter for deployments. Only deployments matching all criteria will be returned." + }, + "DeploymentFilterId": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of deployment ids to include" + } + }, + "additionalProperties": false, + "type": "object", + "title": "DeploymentFilterId", + "description": "Filter by `Deployment.id`." + }, + "DeploymentFilterIsScheduleActive": { + "properties": { + "eq_": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Eq ", + "description": "Only returns where deployment schedule is/is not active" + } + }, + "additionalProperties": false, + "type": "object", + "title": "DeploymentFilterIsScheduleActive", + "description": "Legacy filter to filter by `Deployment.is_schedule_active` which\nis always the opposite of `Deployment.paused`." + }, + "DeploymentFilterName": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of deployment names to include", + "examples": [ + [ + "my-deployment-1", + "my-deployment-2" + ] + ] + }, + "like_": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Like ", + "description": "A case-insensitive partial match. For example, passing 'marvin' will match 'marvin', 'sad-Marvin', and 'marvin-robot'.", + "examples": [ + "marvin" + ] + } + }, + "additionalProperties": false, + "type": "object", + "title": "DeploymentFilterName", + "description": "Filter by `Deployment.name`." + }, + "DeploymentFilterPaused": { + "properties": { + "eq_": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Eq ", + "description": "Only returns where deployment is/is not paused" + } + }, + "additionalProperties": false, + "type": "object", + "title": "DeploymentFilterPaused", + "description": "Filter by `Deployment.paused`." + }, + "DeploymentFilterTags": { + "properties": { + "operator": { + "allOf": [ + { + "$ref": "#/components/schemas/Operator" + } + ], + "description": "Operator for combining filter criteria. Defaults to 'and_'.", + "default": "and_" + }, + "all_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "All ", + "description": "A list of tags. Deployments will be returned only if their tags are a superset of the list", + "examples": [ + [ + "tag-1", + "tag-2" + ] + ] + }, + "is_null_": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Is Null ", + "description": "If true, only include deployments without tags" + } + }, + "additionalProperties": false, + "type": "object", + "title": "DeploymentFilterTags", + "description": "Filter by `Deployment.tags`." + }, + "DeploymentFilterWorkQueueName": { + "properties": { + "any_": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Any ", + "description": "A list of work queue names to include", + "examples": [ + [ + "work_queue_1", + "work_queue_2" + ] + ] + } + }, + "additionalProperties": false, + "type": "object", + "title": "DeploymentFilterWorkQueueName", + "description": "Filter by `Deployment.work_queue_name`." + }, + "DeploymentFlowRunCreate": { + "properties": { + "state": { + "anyOf": [ + { + "$ref": "#/components/schemas/StateCreate" + }, + { + "type": "null" + } + ], + "description": "The state of the flow run to create" + }, + "name": { + "type": "string", + "title": "Name", + "description": "The name of the flow run. Defaults to a random slug if not specified.", + "examples": [ + "my-flow-run" + ] + }, + "parameters": { + "type": "object", + "title": "Parameters" + }, + "context": { + "type": "object", + "title": "Context" + }, + "infrastructure_document_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Infrastructure Document Id" + }, + "empirical_policy": { + "allOf": [ + { + "$ref": "#/components/schemas/FlowRunPolicy" + } + ], + "description": "The empirical policy for the flow run." + }, + "tags": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Tags", + "description": "A list of tags for the flow run.", + "examples": [ + [ + "tag-1", + "tag-2" + ] + ] + }, + "idempotency_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Idempotency Key", + "description": "An optional idempotency key. If a flow run with the same idempotency key has already been created, the existing flow run will be returned." + }, + "parent_task_run_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Parent Task Run Id" + }, + "work_queue_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Work Queue Name" + }, + "job_variables": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Job Variables" + } + }, + "additionalProperties": false, + "type": "object", + "title": "DeploymentFlowRunCreate", + "description": "Data used by the Prefect REST API to create a flow run from a deployment." + }, + "DeploymentResponse": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + }, + "name": { + "type": "string", + "title": "Name", + "description": "The name of the deployment." + }, + "version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Version", + "description": "An optional version for the deployment." + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description", + "description": "A description for the deployment." + }, + "flow_id": { + "type": "string", + "format": "uuid", + "title": "Flow Id", + "description": "The flow id associated with the deployment." + }, + "schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/IntervalSchedule" + }, + { + "$ref": "#/components/schemas/CronSchedule" + }, + { + "$ref": "#/components/schemas/RRuleSchedule" + }, + { + "type": "null" + } + ], + "title": "Schedule", + "description": "A schedule for the deployment." + }, + "is_schedule_active": { + "type": "boolean", + "title": "Is Schedule Active", + "description": "Whether or not the deployment schedule is active.", + "default": true + }, + "paused": { + "type": "boolean", + "title": "Paused", + "description": "Whether or not the deployment is paused.", + "default": false + }, + "schedules": { + "items": { + "$ref": "#/components/schemas/DeploymentSchedule" + }, + "type": "array", + "title": "Schedules", + "description": "A list of schedules for the deployment." + }, + "job_variables": { + "type": "object", + "title": "Job Variables", + "description": "Overrides to apply to the base infrastructure block at runtime." + }, + "parameters": { + "type": "object", + "title": "Parameters", + "description": "Parameters for flow runs scheduled by the deployment." + }, + "tags": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Tags", + "description": "A list of tags for the deployment", + "examples": [ + [ + "tag-1", + "tag-2" + ] + ] + }, + "work_queue_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Work Queue Name", + "description": "The work queue for the deployment. If no work queue is set, work will not be scheduled." + }, + "last_polled": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Last Polled", + "description": "The last time the deployment was polled for status updates." + }, + "parameter_openapi_schema": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Parameter Openapi Schema", + "description": "The parameter schema of the flow, including defaults." + }, + "path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Path", + "description": "The path to the working directory for the workflow, relative to remote storage or an absolute path." + }, + "pull_steps": { + "anyOf": [ + { + "items": { + "type": "object" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Pull Steps", + "description": "Pull steps for cloning and running this deployment." + }, + "entrypoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Entrypoint", + "description": "The path to the entrypoint for the workflow, relative to the `path`." + }, + "manifest_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Manifest Path", + "description": "The path to the flow's manifest file, relative to the chosen storage." + }, + "storage_document_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Storage Document Id", + "description": "The block document defining storage used for this flow." + }, + "infrastructure_document_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Infrastructure Document Id", + "description": "The block document defining infrastructure to use for flow runs." + }, + "created_by": { + "anyOf": [ + { + "$ref": "#/components/schemas/CreatedBy" + }, + { + "type": "null" + } + ], + "description": "Optional information about the creator of this deployment." + }, + "updated_by": { + "anyOf": [ + { + "$ref": "#/components/schemas/UpdatedBy" + }, + { + "type": "null" + } + ], + "description": "Optional information about the updater of this deployment." + }, + "work_pool_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Work Pool Name", + "description": "The name of the deployment's work pool." + }, + "status": { + "anyOf": [ + { + "$ref": "#/components/schemas/DeploymentStatus" + }, + { + "type": "null" + } + ], + "description": "Whether the deployment is ready to run flows.", + "default": "NOT_READY" + }, + "enforce_parameter_schema": { + "type": "boolean", + "title": "Enforce Parameter Schema", + "description": "Whether or not the deployment should enforce the parameter schema.", + "default": true + } + }, + "type": "object", + "required": [ + "name", + "flow_id" + ], + "title": "DeploymentResponse" + }, + "DeploymentSchedule": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + }, + "created": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Created" + }, + "updated": { + "anyOf": [ + { + "type": "string", + "format": "date-time" + }, + { + "type": "null" + } + ], + "title": "Updated" + }, + "deployment_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Deployment Id", + "description": "The deployment id associated with this schedule." + }, + "schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/IntervalSchedule" + }, + { + "$ref": "#/components/schemas/CronSchedule" + }, + { + "$ref": "#/components/schemas/RRuleSchedule" + } + ], + "title": "Schedule", + "description": "The schedule for the deployment." + }, + "active": { + "type": "boolean", + "title": "Active", + "description": "Whether or not the schedule is active.", + "default": true + }, + "max_active_runs": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Max Active Runs", + "description": "The maximum number of active runs for the schedule." + }, + "max_scheduled_runs": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Max Scheduled Runs", + "description": "The maximum number of scheduled runs for the schedule." + }, + "catchup": { + "type": "boolean", + "title": "Catchup", + "description": "Whether or not a worker should catch up on Late runs for the schedule.", + "default": false + } + }, + "type": "object", + "required": [ + "schedule" + ], + "title": "DeploymentSchedule" + }, + "DeploymentScheduleCreate": { + "properties": { + "active": { + "type": "boolean", + "title": "Active", + "description": "Whether or not the schedule is active.", + "default": true + }, + "schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/IntervalSchedule" + }, + { + "$ref": "#/components/schemas/CronSchedule" + }, + { + "$ref": "#/components/schemas/RRuleSchedule" + } + ], + "title": "Schedule", + "description": "The schedule for the deployment." + }, + "max_active_runs": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Max Active Runs", + "description": "The maximum number of active runs for the schedule." + }, + "max_scheduled_runs": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Max Scheduled Runs", + "description": "The maximum number of scheduled runs for the schedule." + }, + "catchup": { + "type": "boolean", + "title": "Catchup", + "description": "Whether or not a worker should catch up on Late runs for the schedule.", + "default": false + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "schedule" + ], + "title": "DeploymentScheduleCreate" + }, + "DeploymentScheduleUpdate": { + "properties": { + "active": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Active", + "description": "Whether or not the schedule is active." + }, + "schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/IntervalSchedule" + }, + { + "$ref": "#/components/schemas/CronSchedule" + }, + { + "$ref": "#/components/schemas/RRuleSchedule" + }, + { + "type": "null" + } + ], + "title": "Schedule", + "description": "The schedule for the deployment." + }, + "max_active_runs": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Max Active Runs", + "description": "The maximum number of active runs for the schedule." + }, + "max_scheduled_runs": { + "anyOf": [ + { + "type": "integer", + "exclusiveMinimum": 0.0 + }, + { + "type": "null" + } + ], + "title": "Max Scheduled Runs", + "description": "The maximum number of scheduled runs for the schedule." + }, + "catchup": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Catchup", + "description": "Whether or not a worker should catch up on Late runs for the schedule." + } + }, + "additionalProperties": false, + "type": "object", + "title": "DeploymentScheduleUpdate" + }, + "DeploymentSort": { + "type": "string", + "enum": [ + "CREATED_DESC", + "UPDATED_DESC", + "NAME_ASC", + "NAME_DESC" + ], + "title": "DeploymentSort", + "description": "Defines deployment sorting options." + }, + "DeploymentStatus": { + "type": "string", + "enum": [ + "READY", + "NOT_READY" + ], + "title": "DeploymentStatus", + "description": "Enumeration of deployment statuses." + }, + "DeploymentUpdate": { + "properties": { + "version": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Version" + }, + "schedule": { + "anyOf": [ + { + "$ref": "#/components/schemas/IntervalSchedule" + }, + { + "$ref": "#/components/schemas/CronSchedule" + }, + { + "$ref": "#/components/schemas/RRuleSchedule" + }, + { + "type": "null" + } + ], + "title": "Schedule", + "description": "The schedule for the deployment." + }, + "description": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Description" + }, + "is_schedule_active": { + "type": "boolean", + "title": "Is Schedule Active", + "description": "Whether the schedule is active.", + "default": true + }, + "paused": { + "type": "boolean", + "title": "Paused", + "description": "Whether or not the deployment is paused.", + "default": false + }, + "schedules": { + "items": { + "$ref": "#/components/schemas/DeploymentScheduleCreate" + }, + "type": "array", + "title": "Schedules", + "description": "A list of schedules for the deployment." + }, + "parameters": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Parameters", + "description": "Parameters for flow runs scheduled by the deployment." + }, + "tags": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Tags", + "description": "A list of deployment tags.", + "examples": [ + [ + "tag-1", + "tag-2" + ] + ] + }, + "work_queue_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Work Queue Name" + }, + "work_pool_name": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Work Pool Name", + "description": "The name of the deployment's work pool.", + "examples": [ + "my-work-pool" + ] + }, + "path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Path" + }, + "job_variables": { + "anyOf": [ + { + "type": "object" + }, + { + "type": "null" + } + ], + "title": "Job Variables", + "description": "Overrides for the flow's infrastructure configuration." + }, + "entrypoint": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Entrypoint" + }, + "manifest_path": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Manifest Path" + }, + "storage_document_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Storage Document Id" + }, + "infrastructure_document_id": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Infrastructure Document Id" + }, + "enforce_parameter_schema": { + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "null" + } + ], + "title": "Enforce Parameter Schema", + "description": "Whether or not the deployment should enforce the parameter schema." + } + }, + "additionalProperties": false, + "type": "object", + "title": "DeploymentUpdate", + "description": "Data used by the Prefect REST API to update a deployment." + }, + "DoNothing": { + "properties": { + "type": { + "type": "string", + "enum": [ + "do-nothing" + ], + "const": "do-nothing", + "title": "Type", + "default": "do-nothing" + } + }, + "type": "object", + "title": "DoNothing", + "description": "Do nothing when an Automation is triggered" + }, + "Edge": { + "properties": { + "id": { + "type": "string", + "format": "uuid", + "title": "Id" + } + }, + "type": "object", + "required": [ + "id" + ], + "title": "Edge" + }, + "Event": { + "properties": { + "occurred": { + "type": "string", + "format": "date-time", + "title": "Occurred", + "description": "When the event happened from the sender's perspective" + }, + "event": { + "type": "string", + "title": "Event", + "description": "The name of the event that happened" + }, + "resource": { + "allOf": [ + { + "$ref": "#/components/schemas/Resource" + } + ], + "description": "The primary Resource this event concerns" + }, + "related": { + "items": { + "$ref": "#/components/schemas/RelatedResource" + }, + "type": "array", + "title": "Related", + "description": "A list of additional Resources involved in this event" + }, + "payload": { + "type": "object", + "title": "Payload", + "description": "An open-ended set of data describing what happened" + }, + "id": { + "type": "string", + "format": "uuid", + "title": "Id", + "description": "The client-provided identifier of this event" + }, + "follows": { + "anyOf": [ + { + "type": "string", + "format": "uuid" + }, + { + "type": "null" + } + ], + "title": "Follows", + "description": "The ID of an event that is known to have occurred prior to this one. If set, this may be used to establish a more precise ordering of causally-related events when they occur close enough together in time that the system may receive them out-of-order." + } + }, + "type": "object", + "required": [ + "occurred", + "event", + "resource", + "id" + ], + "title": "Event", + "description": "The client-side view of an event that has happened to a Resource" + }, + "EventAnyResourceFilter": { + "properties": { + "id": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Id", + "description": "Only include events for resources with these IDs" + }, + "id_prefix": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Id Prefix", + "description": "Only include events for resources with IDs starting with these prefixes" + }, + "labels": { + "anyOf": [ + { + "$ref": "#/components/schemas/ResourceSpecification" + }, + { + "type": "null" + } + ], + "description": "Only include events for related resources with these labels" + } + }, + "additionalProperties": false, + "type": "object", + "title": "EventAnyResourceFilter" + }, + "EventCount": { + "properties": { + "value": { + "type": "string", + "title": "Value", + "description": "The value to use for filtering" + }, + "label": { + "type": "string", + "title": "Label", + "description": "The value to display for this count" + }, + "count": { + "type": "integer", + "title": "Count", + "description": "The count of matching events" + }, + "start_time": { + "type": "string", + "format": "date-time", + "title": "Start Time", + "description": "The start time of this group of events" + }, + "end_time": { + "type": "string", + "format": "date-time", + "title": "End Time", + "description": "The end time of this group of events" + } + }, + "type": "object", + "required": [ + "value", + "label", + "count", + "start_time", + "end_time" + ], + "title": "EventCount", + "description": "The count of events with the given filter value" + }, + "EventFilter": { + "properties": { + "occurred": { + "allOf": [ + { + "$ref": "#/components/schemas/EventOccurredFilter" + } + ], + "description": "Filter criteria for when the events occurred" + }, + "event": { + "anyOf": [ + { + "$ref": "#/components/schemas/EventNameFilter" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for the event name" + }, + "any_resource": { + "anyOf": [ + { + "$ref": "#/components/schemas/EventAnyResourceFilter" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for any resource involved in the event" + }, + "resource": { + "anyOf": [ + { + "$ref": "#/components/schemas/EventResourceFilter" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for the resource of the event" + }, + "related": { + "anyOf": [ + { + "$ref": "#/components/schemas/EventRelatedFilter" + }, + { + "type": "null" + } + ], + "description": "Filter criteria for the related resources of the event" + }, + "id": { + "allOf": [ + { + "$ref": "#/components/schemas/EventIDFilter" + } + ], + "description": "Filter criteria for the events' ID" + }, + "order": { + "allOf": [ + { + "$ref": "#/components/schemas/EventOrder" + } + ], + "description": "The order to return filtered events", + "default": "DESC" + } + }, + "additionalProperties": false, + "type": "object", + "title": "EventFilter" + }, + "EventIDFilter": { + "properties": { + "id": { + "anyOf": [ + { + "items": { + "type": "string", + "format": "uuid" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Id", + "description": "Only include events with one of these IDs" + } + }, + "additionalProperties": false, + "type": "object", + "title": "EventIDFilter" + }, + "EventNameFilter": { + "properties": { + "prefix": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Prefix", + "description": "Only include events matching one of these prefixes" + }, + "exclude_prefix": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Exclude Prefix", + "description": "Exclude events matching one of these prefixes" + }, + "name": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Name", + "description": "Only include events matching one of these names exactly" + }, + "exclude_name": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Exclude Name", + "description": "Exclude events matching one of these names exactly" + } + }, + "additionalProperties": false, + "type": "object", + "title": "EventNameFilter" + }, + "EventOccurredFilter": { + "properties": { + "since": { + "type": "string", + "format": "date-time", + "title": "Since", + "description": "Only include events after this time (inclusive)" + }, + "until": { + "type": "string", + "format": "date-time", + "title": "Until", + "description": "Only include events prior to this time (inclusive)" + } + }, + "additionalProperties": false, + "type": "object", + "title": "EventOccurredFilter" + }, + "EventOrder": { + "type": "string", + "enum": [ + "ASC", + "DESC" + ], + "title": "EventOrder" + }, + "EventPage": { + "properties": { + "events": { + "items": { + "$ref": "#/components/schemas/ReceivedEvent" + }, + "type": "array", + "title": "Events", + "description": "The Events matching the query" + }, + "total": { + "type": "integer", + "title": "Total", + "description": "The total number of matching Events" + }, + "next_page": { + "anyOf": [ + { + "type": "string", + "minLength": 1, + "format": "uri" + }, + { + "type": "null" + } + ], + "title": "Next Page", + "description": "The URL for the next page of results, if there are more" + } + }, + "type": "object", + "required": [ + "events", + "total", + "next_page" + ], + "title": "EventPage", + "description": "A single page of events returned from the API, with an optional link to the\nnext page of results" + }, + "EventRelatedFilter": { + "properties": { + "id": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Id", + "description": "Only include events for related resources with these IDs" + }, + "role": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Role", + "description": "Only include events for related resources in these roles" + }, + "resources_in_roles": { + "anyOf": [ + { + "items": { + "prefixItems": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array", + "maxItems": 2, + "minItems": 2 + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Resources In Roles", + "description": "Only include events with specific related resources in specific roles" + }, + "labels": { + "anyOf": [ + { + "$ref": "#/components/schemas/ResourceSpecification" + }, + { + "type": "null" + } + ], + "description": "Only include events for related resources with these labels" + } + }, + "additionalProperties": false, + "type": "object", + "title": "EventRelatedFilter" + }, + "EventResourceFilter": { + "properties": { + "id": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Id", + "description": "Only include events for resources with these IDs" + }, + "id_prefix": { + "anyOf": [ + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "type": "null" + } + ], + "title": "Id Prefix", + "description": "Only include events for resources with IDs starting with these prefixes." + }, + "labels": { + "anyOf": [ + { + "$ref": "#/components/schemas/ResourceSpecification" + }, + { + "type": "null" + } + ], + "description": "Only include events for resources with these labels" + }, + "distinct": { + "type": "boolean", + "title": "Distinct", + "description": "Only include events for distinct resources", + "default": false + } + }, + "additionalProperties": false, + "type": "object", + "title": "EventResourceFilter" + }, + "EventTrigger": { + "properties": { + "type": { + "type": "string", + "enum": [ + "event" + ], + "const": "event", + "title": "Type", + "default": "event" + }, + "id": { + "type": "string", + "format": "uuid", + "title": "Id", + "description": "The unique ID of this trigger" + }, + "match": { + "allOf": [ + { + "$ref": "#/components/schemas/ResourceSpecification" + } + ], + "description": "Labels for resources which this trigger will match." + }, + "match_related": { + "allOf": [ + { + "$ref": "#/components/schemas/ResourceSpecification" + } + ], + "description": "Labels for related resources which this trigger will match." + }, + "after": { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true, + "title": "After", + "description": "The event(s) which must first been seen to fire this trigger. If empty, then fire this trigger immediately. Events may include trailing wildcards, like `prefect.flow-run.*`" + }, + "expect": { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true, + "title": "Expect", + "description": "The event(s) this trigger is expecting to see. If empty, this trigger will match any event. Events may include trailing wildcards, like `prefect.flow-run.*`" + }, + "for_each": { + "items": { + "type": "string" + }, + "type": "array", + "uniqueItems": true, + "title": "For Each", + "description": "Evaluate the trigger separately for each distinct value of these labels on the resource. By default, labels refer to the primary resource of the triggering event. You may also refer to labels from related resources by specifying `related::