Skip to content

cli-jobs-spark-serverless-spark-pipeline-default-identity #235

cli-jobs-spark-serverless-spark-pipeline-default-identity

cli-jobs-spark-serverless-spark-pipeline-default-identity #235

# This code is autogenerated.
# Code is generated by running custom script: python3 readme.py
# Any manual changes to this file may cause incorrect behavior.
# Any manual changes will be overwritten if the code is regenerated.
name: cli-jobs-spark-serverless-spark-pipeline-default-identity
on:
workflow_dispatch:
schedule:
- cron: "33 10/12 * * *"
pull_request:
branches:
- main
paths:
- cli/jobs/spark/**
- infra/bootstrapping/**
- .github/workflows/cli-jobs-spark-serverless-spark-pipeline-default-identity.yml
- cli/jobs/spark/data/titanic.csv
- cli/setup.sh
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: check out repo
uses: actions/checkout@v2
- name: azure login
uses: azure/login@v1
with:
creds: ${{secrets.AZUREML_CREDENTIALS}}
- name: bootstrap resources
run: |
echo '${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}';
bash bootstrap.sh
working-directory: infra/bootstrapping
continue-on-error: false
- name: setup-cli
run: |
source "${{ github.workspace }}/infra/bootstrapping/sdk_helpers.sh";
source "${{ github.workspace }}/infra/bootstrapping/init_environment.sh";
bash setup.sh
working-directory: cli
continue-on-error: true
- name: upload data
run: |
bash -x upload-data-to-blob.sh jobs/spark/
working-directory: cli
- name: run job
run: |
source "${{ github.workspace }}/infra/bootstrapping/sdk_helpers.sh";
source "${{ github.workspace }}/infra/bootstrapping/init_environment.sh";
bash -x ../../run-job.sh serverless-spark-pipeline-default-identity.yml
working-directory: cli/jobs/spark