Skip to content

Commit

Permalink
split up c2l; skip test for dstg due to stochasticity
Browse files Browse the repository at this point in the history
  • Loading branch information
csangara committed Oct 3, 2024
1 parent bbefd9c commit 06cb10c
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 15 deletions.
69 changes: 69 additions & 0 deletions .github/workflows/test_run_dataset_cell2location.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
name: test run_dataset mode for Python tools
# This workflow runs the pipeline with the minimal test dataset to check that it completes without any syntax errors
on: [ pull_request, workflow_dispatch ]

jobs:
run_dataset:
name: Run workflow with run_dataset mode
runs-on: ubuntu-latest
strategy:
matrix:
# Nextflow versions: check pipeline minimum and latest edge version
nxf_ver:
- 'NXF_VER=21.04.3'
steps:
- name: Check out pipeline code
uses: actions/checkout@v3
# with
# lfs: true

- name: Download test dataset
run: |
wget https://zenodo.org/record/5763377/files/test_data.tar.gz?download=1 -O test_data.tar.gz
tar -xzvf test_data.tar.gz
echo $(ls)
# - name: Checkout LFS objects
# run: git lfs checkout

- name: Install Nextflow
run: |
wget -qO- get.nextflow.io | bash
sudo mv nextflow /usr/local/bin/
export ${{ matrix.nxf_ver }}
nextflow self-update
- name: Run pipeline with test data
run: |
nextflow run main.nf -profile test,docker --mode run_dataset \
--methods cell2location
- name: Upload proportions and metrics
uses: actions/upload-artifact@v3
with:
name: deconv-proportions-and-metrics
path: |
deconv_proportions/*
results/*
trace.txt
test_output_run_dataset:
name: Tests outputs of the pipeline
needs: run_dataset
runs-on: ubuntu-latest
container: rocker/tidyverse:3.6.3
steps:
- name: Check out pipeline code
uses: actions/checkout@v3

- name: Download proportions from test run
uses: actions/download-artifact@v3
with:
name: deconv-proportions-and-metrics

- name: Check proportions and metrics with testthat
shell: bash
run: |
echo $(ls)
Rscript unit-test/test_run_dataset.R \
cell2location
4 changes: 2 additions & 2 deletions .github/workflows/test_run_dataset_python.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ jobs:
- name: Run pipeline with test data
run: |
nextflow run main.nf -profile test,docker --mode run_dataset \
--methods cell2location,stereoscope,destvi,tangram,stride
--methods stereoscope,destvi,tangram,stride
- name: Upload proportions and metrics
uses: actions/upload-artifact@v3
Expand Down Expand Up @@ -66,4 +66,4 @@ jobs:
run: |
echo $(ls)
Rscript unit-test/test_run_dataset.R \
cell2location,stereoscope,destvi,tangram,stride
stereoscope,destvi,tangram,stride
29 changes: 16 additions & 13 deletions unit-test/test_run_dataset.R
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,16 @@ for (method in methods){
expect_equal(colnames(output_props), celltypenames)

# Check 2
cat(">>> Checking whether proportions are correct...\n")
expected_props <- read.table(paste0("unit-test/test_run_dataset/",
"proportions_", method, "_test_sp_data"),
sep="\t", header=TRUE)
expect_equal(rowSums(output_props), rep(1, 16), tolerance=1e-6)
expect_equal(sum(output_props$L23IT), sum(expected_props$L23IT), tolerance=1e-3)
expect_equal(output_props[15,1:10], expected_props[15,1:10], tolerance=1e-3)
if (method != "dstg"){
cat(">>> Checking whether proportions are correct...\n")
expected_props <- read.table(paste0("unit-test/test_run_dataset/",
"proportions_", method, "_test_sp_data"),
sep="\t", header=TRUE)
expect_equal(rowSums(output_props), rep(1, 16), tolerance=1e-6)
expect_equal(sum(output_props$L23IT), sum(expected_props$L23IT), tolerance=1e-3)
expect_equal(output_props[15,1:10], expected_props[15,1:10], tolerance=1e-3)
}


# Check 3
cat(">>> Checking whether metrics file is formatted correctly...\n")
output_metrics <- read.table(paste0("results/test_sp_data/metrics_", method, "_test_sp_data"),
Expand All @@ -47,12 +48,14 @@ for (method in methods){
expect_equal(colnames(output_metrics), metric_names)

# Check 4
cat(">>> Checking whether metrics are correct...\n")
expected_metrics <- read.table(paste0("unit-test/test_run_dataset/",
"metrics_", method, "_test_sp_data"),
sep=" ", header=TRUE)
if (method != "dstg"){
cat(">>> Checking whether metrics are correct...\n")
expected_metrics <- read.table(paste0("unit-test/test_run_dataset/",
"metrics_", method, "_test_sp_data"),
sep=" ", header=TRUE)

expect_equal(output_metrics, expected_metrics, tolerance=1e-3)
expect_equal(output_metrics, expected_metrics, tolerance=1e-3)
}

}

Expand Down

0 comments on commit 06cb10c

Please sign in to comment.