Skip to content

Use PySide6

Use PySide6 #2

Workflow file for this run

on:
workflow_call:
inputs:
os:
type: string
python-version:
type: string
test-type:
type: string
env:
ERT_SHOW_BACKTRACE: 1
ECL_SKIP_SIGNAL: 1
UV_SYSTEM_PYTHON: 1
jobs:
tests-ert:
name: Run ert tests
runs-on: ${{ inputs.os }}
timeout-minutes: 60
steps:
- uses: actions/install_dependencies_qt
- uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
lfs: true
- uses: actions/setup-python@v5
id: setup_python
with:
python-version: ${{ inputs.python-version }}
- name: Install uv
uses: astral-sh/setup-uv@v4
- name: Install ert
run: |
uv pip install ".[dev, everest]"
- name: GUI Test
if: inputs.test-type == 'gui-tests'
run: |
pytest --cov=ert --cov=everest --cov=_ert --cov-report=xml:cov1.xml --junit-xml=junit.xml -o junit_family=legacy -v --mpl --benchmark-disable tests/ert/ui_tests/gui --durations=25
- name: Upload artifact images
uses: actions/upload-artifact@v4
if: ${{ failure() }}
continue-on-error: true
with:
name: test-images-${{ github.run_number }}-${{ github.run_id }}
path: /tmp/tmp*/**/*.png
- name: CLI Test
if: inputs.test-type == 'cli-tests'
run: |
pytest --cov=ert --cov=everest --cov=_ert --cov-report=xml:cov1.xml --junit-xml=junit.xml -o junit_family=legacy -v --benchmark-disable --dist loadgroup tests/ert/ui_tests/cli --durations=25
- name: Unit Test
if: inputs.test-type == 'unit-tests'
run: |
pytest --cov=ert --cov=everest --cov=_ert --cov-report=xml:cov1.xml --junit-xml=junit.xml -o junit_family=legacy -n logical --show-capture=stderr -v --benchmark-disable --mpl --dist loadgroup tests/ert/unit_tests --durations=25
pytest --doctest-modules --cov=ert --cov=everest --cov=_ert --cov-report=xml:cov2.xml src/ --ignore src/ert/dark_storage
- name: Performance Test
if: inputs.test-type == 'performance-tests'
run: |
pytest --cov=ert --cov=everest --cov=_ert --cov-report=xml:cov1.xml --junit-xml=junit.xml -o junit_family=legacy -n logical --show-capture=stderr -v --benchmark-disable --dist loadgroup tests/ert/performance_tests --durations=25
- name: Upload coverage to Codecov
id: codecov1
uses: codecov/codecov-action@v4
continue-on-error: true
with:
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: true
files: cov1.xml,cov2.xml
flags: ${{ inputs.test-type }}
- name: codecov retry sleep
if: steps.codecov1.outcome == 'failure'
run: |
sleep 30
- name: Codecov retry
uses: codecov/codecov-action@v4
if: steps.codecov1.outcome == 'failure'
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: cov1.xml,cov2.xml
flags: ${{ inputs.test-type }}
fail_ci_if_error: ${{ github.ref == 'refs/heads/main' }}
- uses: test-summary/action@v2
if: ${{ !cancelled() }}
continue-on-error: true
with:
paths: junit.xml
- name: Upload test results to Codecov
if: ${{ !cancelled() }}
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Test CLI
run: |
ert --help
- name: Test for a clean repository
run: |
# Remove things we have generated on purpose:
rm -rf .coverage
rm -f coverage.xml cov1.xml cov2.xml junit.xml
rm -f ert.*.whl
git status --porcelain
test -z "$(git status --porcelain)"