diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index d7360162..deb92b74 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -42,4 +42,4 @@ If there are any failures then the automated tests fail. These tests are run both with the latest available version of Nextflow and also the minimum required version that is stated in the pipeline code. ## Getting help -For further information/help, please consult the [nf-core/nascent documentation](https://github.com/nf-core/nascent#documentation) and don't hesitate to get in touch on [Gitter](https://gitter.im/nf-core/Lobby) +For further information/help, please consult the [nf-core/nascent documentation](https://github.com/nf-core/nascent#documentation) and don't hesitate to get in touch on the pipeline channel on [Slack](https://nf-core-invite.herokuapp.com/). diff --git a/.github/markdownlint.yml b/.github/markdownlint.yml new file mode 100644 index 00000000..e052a635 --- /dev/null +++ b/.github/markdownlint.yml @@ -0,0 +1,9 @@ +# Markdownlint configuration file +default: true, +line-length: false +no-multiple-blanks: 0 +blanks-around-headers: false +blanks-around-lists: false +header-increment: false +no-duplicate-header: + siblings_only: true diff --git a/.gitignore b/.gitignore index 46f69e41..5b54e3e6 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ data/ results/ .DS_Store tests/test_data +*.pyc diff --git a/.travis.yml b/.travis.yml index 117d4a56..b88805da 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,9 +11,10 @@ before_install: # PRs to master are only ok if coming from dev branch - '[ $TRAVIS_PULL_REQUEST = "false" ] || [ $TRAVIS_BRANCH != "master" ] || ([ $TRAVIS_PULL_REQUEST_SLUG = $TRAVIS_REPO_SLUG ] && [ $TRAVIS_PULL_REQUEST_BRANCH = "dev" ])' # Pull the docker image first so the test doesn't wait for this - - docker pull ignaciot/nascent + - docker pull nfcore/nascent:dev # Fake the tag locally so that the pipeline runs properly - - docker tag ignaciot/nascent ignaciot/nascent:latest + # Looks weird when this is :dev to :dev, but makes sense when testing code for a release (:dev to :1.0.1) + - docker tag nfcore/nascent:dev nfcore/nascent:1.0 install: # Install Nextflow @@ -21,10 +22,12 @@ install: - wget -qO- get.nextflow.io | bash - sudo ln -s /tmp/nextflow/nextflow /usr/local/bin/nextflow # Install nf-core/tools - - pip install --upgrade pip + - pip install --upgrade pip - pip install nf-core # Reset - mkdir ${TRAVIS_BUILD_DIR}/tests && cd ${TRAVIS_BUILD_DIR}/tests + # Install markdownlint-cli + - sudo apt-get install npm && npm install -g markdownlint-cli env: - NXF_VER='0.32.0' # Specify a minimum NF version that should be tested and work @@ -33,5 +36,7 @@ env: script: # Lint the pipeline code - nf-core lint ${TRAVIS_BUILD_DIR} + # Lint the documentation + - markdownlint ${TRAVIS_BUILD_DIR} -c ${TRAVIS_BUILD_DIR}/.github/markdownlint.yml # Run the pipeline with the test profile - nextflow run ${TRAVIS_BUILD_DIR} -profile test,docker diff --git a/CHANGELOG.md b/CHANGELOG.md index a3540a15..c025c0d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ # nf-core/nascent: Changelog -## v1.0dev - +## v1.0 - 2019-04-16 Initial release of nf-core/nascent, created with the [nf-core](http://nf-co.re/) template. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 21096193..09226d0d 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -34,7 +34,7 @@ This Code of Conduct applies both within project spaces and in public spaces whe ## Enforcement -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team on the [Gitter channel](https://gitter.im/nf-core/Lobby). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team on [Slack](https://nf-core-invite.herokuapp.com/). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. diff --git a/README.md b/README.md index 5ae239a1..580df378 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ # nf-core/nascent -**Nascent Transcription Processing Pipeline** -[![Build Status](https://travis-ci.org/nf-core/nascent.svg?branch=master)](https://travis-ci.org/nf-core/nascent) -[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.32.0-brightgreen.svg)](https://www.nextflow.io/) +**Nascent Transcription Processing Pipeline**. +[![Build Status](https://travis-ci.com/nf-core/nascent.svg?branch=master)](https://travis-ci.com/nf-core/nascent) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.32.0-brightgreen.svg)](https://www.nextflow.io/) [![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](http://bioconda.github.io/) [![Docker](https://img.shields.io/docker/automated/nfcore/nascent.svg)](https://hub.docker.com/r/nfcore/nascent) ![Singularity Container available]( @@ -13,18 +13,70 @@ https://img.shields.io/badge/singularity-available-7E4C74.svg) The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible. +#### Reference + +If you've used this pipeline in your research, you can cite this pipeline using DOI 10.17605/OSF.IO/SV4UB ([OSF project](https://osf.io/sv4ub/)). + ### Documentation The nf-core/nascent pipeline comes with documentation about the pipeline, found in the `docs/` directory: - -1. [Installation](docs/installation.md) +1. [Installation](https://nf-co.re/usage/installation) 2. Pipeline configuration - * [Local installation](docs/configuration/local.md) - * [Adding your own system](docs/configuration/adding_your_own.md) + * [Local installation](https://nf-co.re/usage/local_installation) + * [Adding your own system config](https://nf-co.re/usage/adding_own_config) + * [Reference genomes](https://nf-co.re/usage/reference_genomes) 3. [Running the pipeline](docs/usage.md) 4. [Output and how to interpret the results](docs/output.md) -5. [Troubleshooting](docs/troubleshooting.md) +5. [Troubleshooting](https://nf-co.re/usage/troubleshooting) + +This pipeline is designed to process the sequencing output of nascent transcription assays, like GRO-seq or PRO-seq. It produces bedGraph- and bigWig-fomatted outputs after mapping strand-specific reads, as well as other useful outputs like quality control reports or IGV-ready (Integrative Genomics Viewer) tdf files. + +### Quick start + +Edit the appropriate config file, e.g. `conf/slurm_grch38.config`, to ensure the proper paths are set for genome reference files and other executables (look for all mentions of `COMPLETE_*`). Variable names should hopefully be self-explanatory. You can specify the Nextflow working directory and output directory with flags. Note you must also now specify the email to which the report will be sent for the run. + + nextflow run nf-core/nascent --reads '*_R{1,2}.fastq.gz' -profile standard,docker + +## Arguments + +### Required Arguments +| Argument | Usage | Description | +|-----------|----------------------------------|----------------------------------------------------------------------| +| -profile | \ | Configuration profile to use. | +| --fastqs | \ | Directory pattern for fastq files. | +| --sras | \ | Directory pattern for sra files. | +| --genome_id | \<'hg38'> | Genome ID to which the samples will be mapped (e.g. hg38, mm10, rn6).| +| --workdir | \ | Nextflow working directory where all intermediate files are saved. | +| --email | \ | Where to send workflow report email. | + +### Save Options +| Arguments | Usage | Description | +|------------|---------------|-----------------------------------------------------------| +| --outdir | \ | Specifies where to save the output from the nextflow run. | +| --savefq | | Compresses and saves raw fastq reads. | +| --saveTrim | | Compresses and saves trimmed fastq reads. | +| --saveAll | | Compresses and saves all fastq reads. | +| --skipBAM | | Skips saving BAM files (only save CRAM). Default=False | + +### Input File Options +| Arguments | Usage | Description | +|--------------|-------------|------------------------------------------------------------------------------| +| --singleEnd | | Specifies that the input files are not paired reads (default is paired-end). | +| --flip | | Reverse complements each strand. Necessary for some library preps. | + +### Performance Options + +| Arguments | Usage | Description | +|-----------------|-------------|---------------------------------------------------------| +| --threadfqdump | | Runs multi-threading for fastq-dump for sra processing. | + +### QC Options + +| Arguments | Usage | Description | +|-----------------|-------------|---------------------------------------------------------| +| --skipMultiQC | | Skip running MultiQC. | +| --skipRSeQC | | Skip running RSeQC. | - +## Credits +nf-core/nascent was originally written by Ignacio Tripodi ([@ignaciot](https://github.com/ignaciot)) and Margaret Gruca ([@magruca](https://github.com/magruca)). -### Credits -nf-core/nascent was originally written by Ignacio Tripodi, Margaret Gruca. +Many thanks to the nf-core team and all who provided invaluable feedback and assistance along the way, particularly to [@apeltzer](https://github.com/apeltzer), [@ewels](https://github.com/ewels), [@drpatelh](https://github.com/drpatelh), and [@pditommaso](https://github.com/pditommaso). diff --git a/Singularity b/Singularity deleted file mode 100644 index 086e81e1..00000000 --- a/Singularity +++ /dev/null @@ -1,18 +0,0 @@ -From:nfcore/base -Bootstrap:docker - -%labels - MAINTAINER Ignacio Tripodi, Margaret Gruca - DESCRIPTION Singularity image containing all requirements for the nf-core/nascent pipeline - VERSION 1.0 - -%environment - PATH=/opt/conda/envs/nf-core-nascent-1.0/bin:$PATH - export PATH - -%files - environment.yml / - -%post - /opt/conda/bin/conda env create -f /environment.yml - /opt/conda/bin/conda clean -a diff --git a/assets/email_template.txt b/assets/email_template.txt index a948c451..de3d6153 100644 --- a/assets/email_template.txt +++ b/assets/email_template.txt @@ -17,23 +17,6 @@ ${errorReport} } %> -<% if (!success){ - out << """#################################################### -## nf-core/nascent execution completed unsuccessfully! ## -#################################################### -The exit status of the task that caused the workflow execution to fail was: $exitStatus. -The full error message was: - -${errorReport} -""" -} else { - out << "## nf-core/nascent execution completed successfully! ##" -} -%> - - - - The workflow was completed at $dateComplete (duration: $duration) The command used to launch the workflow was as follows: diff --git a/assets/multiqc_config.yaml b/assets/multiqc_config.yaml new file mode 100644 index 00000000..5ec55b92 --- /dev/null +++ b/assets/multiqc_config.yaml @@ -0,0 +1,9 @@ +report_comment: > + This report has been generated by the nf-core/nascent + analysis pipeline. For information about how to interpret these results, please see the + documentation. +report_section_order: + nf-core/nascent-software-versions: + order: -1000 + +export_plots: true diff --git a/assets/sendmail_template.txt b/assets/sendmail_template.txt index fd1cd739..2d671220 100644 --- a/assets/sendmail_template.txt +++ b/assets/sendmail_template.txt @@ -1,11 +1,36 @@ To: $email Subject: $subject Mime-Version: 1.0 -Content-Type: multipart/related;boundary="nfmimeboundary" +Content-Type: multipart/related;boundary="nfcoremimeboundary" ---nfmimeboundary +--nfcoremimeboundary Content-Type: text/html; charset=utf-8 $email_html ---nfmimeboundary-- +<% +if (mqcFile){ +def mqcFileObj = new File("$mqcFile") +if (mqcFileObj.length() < mqcMaxSize){ +out << """ +--nfcoremimeboundary +Content-Type: text/html; name=\"multiqc_report\" +Content-Transfer-Encoding: base64 +Content-ID: +Content-Disposition: attachment; filename=\"${mqcFileObj.getName()}\" + +${mqcFileObj. + bytes. + encodeBase64(). + toString(). + tokenize( '\n' )*. + toList()*. + collate( 76 )*. + collect { it.join() }. + flatten(). + join( '\n' )} +""" +}} +%> + +--nfcoremimeboundary-- diff --git a/bin/scrape_software_versions.py b/bin/scrape_software_versions.py index b46a5558..54e6c2e8 100755 --- a/bin/scrape_software_versions.py +++ b/bin/scrape_software_versions.py @@ -3,7 +3,6 @@ from collections import OrderedDict import re -# TODO nf-core: Add additional regexes for new tools in process get_software_versions regexes = { 'nf-core/nascent': ['v_pipeline.txt', r"(\S+)"], 'Nextflow': ['v_nextflow.txt', r"(\S+)"], @@ -24,9 +23,14 @@ if match: results[k] = "v{}".format(match.group(1)) +# Remove software set to false in results +for k in results: + if not results[k]: + del(results[k]) + # Dump to YAML print (''' -id: 'nf-core/nascent-software-versions' +id: 'software_versions' section_name: 'nf-core/nascent Software Versions' section_href: 'https://github.com/nf-core/nascent' plot_type: 'html' @@ -35,5 +39,10 @@
''') for k,v in results.items(): - print("
{}
{}
".format(k,v)) + print("
{}
{}
".format(k,v)) print ("
") + +# Write out regexes as csv file: +with open('software_versions.csv', 'w') as f: + for k,v in results.items(): + f.write("{}\t{}\n".format(k,v)) diff --git a/conf/awsbatch.config b/conf/awsbatch.config index 79078c7b..14af5866 100644 --- a/conf/awsbatch.config +++ b/conf/awsbatch.config @@ -1,10 +1,15 @@ /* * ------------------------------------------------- - * Nextflow config file for AWS Batch + * Nextflow config file for running on AWS batch * ------------------------------------------------- - * Imported under the 'awsbatch' Nextflow profile in nextflow.config - * Uses docker for software depedencies automagically, so not specified here. + * Base config needed for running with -profile awsbatch */ +params { + config_profile_name = 'AWSBATCH' + config_profile_description = 'AWSBATCH Cloud Profile' + config_profile_contact = 'Alexander Peltzer (@apeltzer)' + config_profile_url = 'https://aws.amazon.com/de/batch/' +} aws.region = params.awsregion process.executor = 'awsbatch' diff --git a/conf/base.config b/conf/base.config index ee4dfbe8..9a5ff9da 100644 --- a/conf/base.config +++ b/conf/base.config @@ -11,14 +11,11 @@ process { - container = params.container - - // TODO nf-core: Check the defaults for all processes cpus = { check_max( 2 * task.attempt, 'cpus' ) } memory = { check_max( 8.GB * task.attempt, 'memory' ) } time = { check_max( 2.h * task.attempt, 'time' ) } - errorStrategy = { task.exitStatus in [143,137] ? 'retry' : 'finish' } + errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'finish' } maxRetries = 1 maxErrors = '-1' diff --git a/conf/binac.config b/conf/binac.config deleted file mode 100644 index be69d9e8..00000000 --- a/conf/binac.config +++ /dev/null @@ -1,22 +0,0 @@ -/* - * ---------------------------------------------------------------------------- - * Nextflow config file for use with Singularity on BINAC cluster in Tuebingen - * ---------------------------------------------------------------------------- - * Defines basic usage limits and singularity image id. - */ - -singularity { - enabled = true -} - -process { - beforeScript = 'module load devel/singularity/3.0.1' - executor = 'pbs' - queue = 'short' -} - -params { - max_memory = 128.GB - max_cpus = 28 - max_time = 48.h -} diff --git a/conf/cfc.config b/conf/cfc.config deleted file mode 100644 index 6285925b..00000000 --- a/conf/cfc.config +++ /dev/null @@ -1,21 +0,0 @@ -/* - * ------------------------------------------------------------- - * Nextflow config file for use with Singularity on CFC at QBIC - * ------------------------------------------------------------- - * Defines basic usage limits and singularity image id. - */ - -singularity { - enabled = true -} - -process { - beforeScript = 'module load qbic/singularity_slurm/3.0.1' - executor = 'slurm' -} - -params { - max_memory = 60.GB - max_cpus = 24 - max_time = 140.h -} diff --git a/conf/igenomes.config b/conf/igenomes.config index d19e61f4..08154994 100644 --- a/conf/igenomes.config +++ b/conf/igenomes.config @@ -9,7 +9,6 @@ params { // illumina iGenomes reference file paths - // TODO nf-core: Add new reference types and strip out those that are not needed genomes { 'GRCh37' { bed12 = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.bed" diff --git a/conf/slurm.config b/conf/slurm.config deleted file mode 100644 index 3c0c99e2..00000000 --- a/conf/slurm.config +++ /dev/null @@ -1,21 +0,0 @@ -/* - * ------------------------------------------------- - * Nextflow config file for running tests - * ------------------------------------------------- - * Defines bundled input files and everything required - * to run a fast and simple test. Use as follows: - * nextflow run nf-core/methylseq -profile test - */ - -/* Will run data as paired-end by default. A minimum usage example is as follows: - * nextflow run main.nf -profile fiji - * Enter nextflow run main.nf -profile fiji --help for more aguments - */ - -params { - // Genome Reference File Pathds - fasta = "COMPLETE_PATH_TO_DIRECTORY_CONTAINING_GENOME_FASTA" - hisat2_indices = "COMPLETE_PATH_TO_DIRECTORY_CONTAINING_HISAT2_INDICES" - genome_refseq = "COMPLETE_PATH_TO_REFSEQ_BEDFILE_FOR_THIS_REFERENCE_GENOME" - -} diff --git a/conf/test.config b/conf/test.config index 88510c5e..60b09634 100644 --- a/conf/test.config +++ b/conf/test.config @@ -16,6 +16,8 @@ params { // Input data singleEnd = true threadfqdump = false - fastqs = "https://raw.githubusercontent.com/nf-core/test-datasets/nascent/testdata/SRR4012402.chr21.fastq" - fasta = "https://raw.githubusercontent.com/nf-core/test-datasets/nascent/reference/chr21.fa" + readPaths = [ + ['SRR4012402', ['https://raw.githubusercontent.com/nf-core/test-datasets/nascent/testdata/SRR4012402.fastq']], +] + fasta = 'https://raw.githubusercontent.com/nf-core/test-datasets/nascent/reference/chr21.fa' } diff --git a/conf/uzh.config b/conf/uzh.config deleted file mode 100644 index 68cd7dd5..00000000 --- a/conf/uzh.config +++ /dev/null @@ -1,19 +0,0 @@ -/* - * -------------------------------------------------------------------------------- - * Nextflow config file for use with Singularity on University of Zurich Cluster - * -------------------------------------------------------------------------------- - */ - -singularity { - enabled = true -} - -process { - executor = 'slurm' -} - -params { - max_memory = 1800.GB - max_cpus = 112 - max_time = 168.h -} diff --git a/docs/README.md b/docs/README.md index 22e81c99..1bb7e42d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,10 +2,11 @@ The nf-core/nascent documentation is split into the following files: -1. [Installation](installation.md) -2. [Running the pipeline](usage.md) -3. Pipeline configuration - * [Adding your own system](configuration/adding_your_own.md) - * [Reference genomes](configuration/reference_genomes.md) +1. [Installation](https://nf-co.re/usage/installation) +2. Pipeline configuration + * [Local installation](https://nf-co.re/usage/local_installation) + * [Adding your own system config](https://nf-co.re/usage/adding_own_config) + * [Reference genomes](https://nf-co.re/usage/reference_genomes) +3. [Running the pipeline](usage.md) 4. [Output and how to interpret the results](output.md) -5. [Troubleshooting](troubleshooting.md) +5. [Troubleshooting](https://nf-co.re/usage/troubleshooting) diff --git a/docs/configuration/adding_your_own.md b/docs/configuration/adding_your_own.md deleted file mode 100644 index 29a1fde3..00000000 --- a/docs/configuration/adding_your_own.md +++ /dev/null @@ -1,86 +0,0 @@ -# nf-core/nascent: Configuration for other clusters - -It is entirely possible to run this pipeline on other clusters, though you will need to set up your own config file so that the pipeline knows how to work with your cluster. - -> If you think that there are other people using the pipeline who would benefit from your configuration (eg. other common cluster setups), please let us know. We can add a new configuration and profile which can used by specifying `-profile ` when running the pipeline. - -If you are the only person to be running this pipeline, you can create your config file as `~/.nextflow/config` and it will be applied every time you run Nextflow. Alternatively, save the file anywhere and reference it when running the pipeline with `-c path/to/config` (see the [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for more). - -A basic configuration comes with the pipeline, which runs by default (the `standard` config profile - see [`conf/base.config`](../conf/base.config)). This means that you only need to configure the specifics for your system and overwrite any defaults that you want to change. - -## Cluster Environment -By default, pipeline uses the `local` Nextflow executor - in other words, all jobs are run in the login session. If you're using a simple server, this may be fine. If you're using a compute cluster, this is bad as all jobs will run on the head node. - -To specify your cluster environment, add the following line to your config file: - -```nextflow -process.executor = 'YOUR_SYSTEM_TYPE' -``` - -Many different cluster types are supported by Nextflow. For more information, please see the [Nextflow documentation](https://www.nextflow.io/docs/latest/executor.html). - -Note that you may need to specify cluster options, such as a project or queue. To do so, use the `clusterOptions` config option: - -```nextflow -process { - executor = 'SLURM' - clusterOptions = '-A myproject' -} -``` - - -## Software Requirements -To run the pipeline, several software packages are required. How you satisfy these requirements is essentially up to you and depends on your system. If possible, we _highly_ recommend using either Docker or Singularity. - -Please see the [`installation documentation`](../installation.md) for how to run using the below as a one-off. These instructions are about configuring a config file for repeated use. - -### Docker -Docker is a great way to run nf-core/nascent, as it manages all software installations and allows the pipeline to be run in an identical software environment across a range of systems. - -Nextflow has [excellent integration](https://www.nextflow.io/docs/latest/docker.html) with Docker, and beyond installing the two tools, not much else is required - nextflow will automatically fetch the [nfcore/nascent](https://hub.docker.com/r/nfcore/nascent/) image that we have created and is hosted at dockerhub at run time. - -To add docker support to your own config file, add the following: - -```nextflow -docker.enabled = true -process.container = "nfcore/nascent" -``` - -Note that the dockerhub organisation name annoyingly can't have a hyphen, so is `nfcore` and not `nf-core`. - - -### Singularity image -Many HPC environments are not able to run Docker due to security issues. -[Singularity](http://singularity.lbl.gov/) is a tool designed to run on such HPC systems which is very similar to Docker. - -To specify singularity usage in your pipeline config file, add the following: - -```nextflow -singularity.enabled = true -process.container = "shub://nf-core/nascent" -``` - -If you intend to run the pipeline offline, nextflow will not be able to automatically download the singularity image for you. -Instead, you'll have to do this yourself manually first, transfer the image file and then point to that. - -First, pull the image file where you have an internet connection: - -```bash -singularity pull --name nf-core-nascent.simg shub://nf-core/nascent -``` - -Then transfer this file and point the config file to the image: - -```nextflow -singularity.enabled = true -process.container = "/path/to/nf-core-nascent.simg" -``` - - -### Conda -If you're not able to use Docker or Singularity, you can instead use conda to manage the software requirements. -To use conda in your own config file, add the following: - -```nextflow -process.conda = "$baseDir/environment.yml" -``` diff --git a/docs/configuration/reference_genomes.md b/docs/configuration/reference_genomes.md deleted file mode 100644 index 5af328fc..00000000 --- a/docs/configuration/reference_genomes.md +++ /dev/null @@ -1,49 +0,0 @@ -# nf-core/nascent: Reference Genomes Configuration - -The nf-core/nascent pipeline needs a reference genome for alignment and annotation. - -These paths can be supplied on the command line at run time (see the [usage docs](../usage.md)), -but for convenience it's often better to save these paths in a nextflow config file. -See below for instructions on how to do this. -Read [Adding your own system](adding_your_own.md) to find out how to set up custom config files. - -## Adding paths to a config file -Specifying long paths every time you run the pipeline is a pain. -To make this easier, the pipeline comes configured to understand reference genome keywords which correspond to preconfigured paths, meaning that you can just specify `--genome ID` when running the pipeline. - -Note that this genome key can also be specified in a config file if you always use the same genome. - -To use this system, add paths to your config file using the following template: - -```nextflow -params { - genomes { - 'YOUR-ID' { - fasta = '/genome.fa' - } - 'OTHER-GENOME' { - // [..] - } - } - // Optional - default genome. Ignored if --genome 'OTHER-GENOME' specified on command line - genome = 'YOUR-ID' -} -``` - -You can add as many genomes as you like as long as they have unique IDs. - -## illumina iGenomes -To make the use of reference genomes easier, illumina has developed a centralised resource called [iGenomes](https://support.illumina.com/sequencing/sequencing_software/igenome.html). -Multiple reference index types are held together with consistent structure for multiple genomes. - -We have put a copy of iGenomes up onto AWS S3 hosting and this pipeline is configured to use this by default. -The hosting fees for AWS iGenomes are currently kindly funded by a grant from Amazon. -The pipeline will automatically download the required reference files when you run the pipeline. -For more information about the AWS iGenomes, see https://ewels.github.io/AWS-iGenomes/ - -Downloading the files takes time and bandwidth, so we recommend making a local copy of the iGenomes resource. -Once downloaded, you can customise the variable `params.igenomes_base` in your custom configuration file to point to the reference location. -For example: -```nextflow -params.igenomes_base = '/path/to/data/igenomes/' -``` diff --git a/docs/installation.md b/docs/installation.md deleted file mode 100644 index 94bd13f0..00000000 --- a/docs/installation.md +++ /dev/null @@ -1,115 +0,0 @@ -# nf-core/nascent: Installation - -To start using the nf-core/nascent pipeline, follow the steps below: - -1. [Install Nextflow](#1-install-nextflow) -2. [Install the pipeline](#2-install-the-pipeline) - * [Automatic](#21-automatic) - * [Offline](#22-offline) - * [Development](#23-development) -3. [Pipeline configuration](#3-pipeline-configuration) - * [Software deps: Docker and Singularity](#31-software-deps-docker-and-singularity) - * [Software deps: Bioconda](#32-software-deps-bioconda) - * [Configuration profiles](#33-configuration-profiles) -4. [Reference genomes](#4-reference-genomes) -5. [Appendices](#appendices) - * [Running on UPPMAX](#running-on-uppmax) - -## 1) Install NextFlow -Nextflow runs on most POSIX systems (Linux, Mac OSX etc). It can be installed by running the following commands: - -```bash -# Make sure that Java v8+ is installed: -java -version - -# Install Nextflow -curl -fsSL get.nextflow.io | bash - -# Add Nextflow binary to your PATH: -mv nextflow ~/bin/ -# OR system-wide installation: -# sudo mv nextflow /usr/local/bin -``` - -See [nextflow.io](https://www.nextflow.io/) for further instructions on how to install and configure Nextflow. - -## 2) Install the pipeline - -#### 2.1) Automatic -This pipeline itself needs no installation - NextFlow will automatically fetch it from GitHub if `nf-core/nascent` is specified as the pipeline name. - -#### 2.2) Offline -The above method requires an internet connection so that Nextflow can download the pipeline files. If you're running on a system that has no internet connection, you'll need to download and transfer the pipeline files manually: - -```bash -wget https://github.com/nf-core/nascent/archive/master.zip -mkdir -p ~/my-pipelines/nf-core/ -unzip master.zip -d ~/my-pipelines/nf-core/ -cd ~/my_data/ -nextflow run ~/my-pipelines/nf-core/nascent-master -``` - -To stop nextflow from looking for updates online, you can tell it to run in offline mode by specifying the following environment variable in your ~/.bashrc file: - -```bash -export NXF_OFFLINE='TRUE' -``` - -#### 2.3) Development - -If you would like to make changes to the pipeline, it's best to make a fork on GitHub and then clone the files. Once cloned you can run the pipeline directly as above. - - -## 3) Pipeline configuration -By default, the pipeline runs with the `standard` configuration profile. This uses a number of sensible defaults for process requirements and is suitable for running on a simple (if powerful!) basic server. You can see this configuration in [`conf/base.config`](../conf/base.config). - -Be warned of two important points about this default configuration: - -1. The default profile uses the `local` executor - * All jobs are run in the login session. If you're using a simple server, this may be fine. If you're using a compute cluster, this is bad as all jobs will run on the head node. - * See the [nextflow docs](https://www.nextflow.io/docs/latest/executor.html) for information about running with other hardware backends. Most job scheduler systems are natively supported. -2. Nextflow will expect all software to be installed and available on the `PATH` - -#### 3.1) Software deps: Docker -First, install docker on your system: [Docker Installation Instructions](https://docs.docker.com/engine/installation/) - -Then, running the pipeline with the option `-profile standard,docker` tells Nextflow to enable Docker for this run. An image containing all of the software requirements will be automatically fetched and used from dockerhub (https://hub.docker.com/r/nfcore/nascent). - -#### 3.1) Software deps: Singularity -If you're not able to use Docker then [Singularity](http://singularity.lbl.gov/) is a great alternative. -The process is very similar: running the pipeline with the option `-profile standard,singularity` tells Nextflow to enable singularity for this run. An image containing all of the software requirements will be automatically fetched and used from singularity hub. - -If running offline with Singularity, you'll need to download and transfer the Singularity image first: - -```bash -singularity pull --name nf-core-nascent.simg shub://nf-core/nascent -``` - -Once transferred, use `-with-singularity` and specify the path to the image file: - -```bash -nextflow run /path/to/nf-core-nascent -with-singularity nf-core-nascent.simg -``` - -Remember to pull updated versions of the singularity image if you update the pipeline. - - -#### 3.2) Software deps: conda -If you're not able to use Docker _or_ Singularity, you can instead use conda to manage the software requirements. -This is slower and less reproducible than the above, but is still better than having to install all requirements yourself! -The pipeline ships with a conda environment file and nextflow has built-in support for this. -To use it first ensure that you have conda installed (we recommend [miniconda](https://conda.io/miniconda.html)), then follow the same pattern as above and use the flag `-profile standard,conda` - - -## Appendices - -#### Running on UPPMAX -To run the pipeline on the [Swedish UPPMAX](https://www.uppmax.uu.se/) clusters (`rackham`, `irma`, `bianca` etc), use the command line flag `-profile uppmax`. This tells Nextflow to submit jobs using the SLURM job executor with Singularity for software dependencies. - -Note that you will need to specify your UPPMAX project ID when running a pipeline. To do this, use the command line flag `--project `. The pipeline will exit with an error message if you try to run it pipeline with the default UPPMAX config profile without a project. - -**Optional Extra:** To avoid having to specify your project every time you run Nextflow, you can add it to your personal Nextflow config file instead. Add this line to `~/.nextflow/config`: - -```nextflow -params.project = 'project_ID' // eg. b2017123 -``` diff --git a/docs/output.md b/docs/output.md index 2f3961a0..e017e7f2 100644 --- a/docs/output.md +++ b/docs/output.md @@ -2,32 +2,60 @@ This document describes the output produced by the pipeline. Most of the plots are taken from the MultiQC report, which summarises results at the end of the pipeline. - - ## Pipeline overview The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes data using the following steps: +* [fastq-dump](#fastqdump) - if needed, extract the fastq file[s] from a sample +* [SeqKit/bbduk](#seqkitbbduk) - flip reads (experiment specific) & trim reads for adapters/quality/length * [FastQC](#fastqc) - read quality control * [MultiQC](#multiqc) - aggregate report, describing results of the whole pipeline +* [HISAT2](#hisat2) - map reads to the reference genome +* [Samtools](#samtools) - convert the mapped reads as SAM files to BAM format +* [Preseq](#preseq) - estimate complexity of the sample +* [RSeQC](#rseqc) - analyze read distributions, infer experiment (SE/PE, whether reads need to be flipped), & read duplication +* [BBMap](#pileup) - analyze coverage +* [bedtools](#bedtools) - create both normalized and non-normalized coverage files in bedGraph format +* [igvtools](#igvtools) - create compressed files to visualize the sample in the Integrative Genomics Viewer ([IGV](http://software.broadinstitute.org/software/igv/home)) + + +## fastqdump +[fastq-dump](https://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?view=toolkit_doc&f=fastq-dump) decompresses an SRR file obtained from the Gene Expression Omnibus ([GEO](https://www.ncbi.nlm.nih.gov/geo/)) database. This will produce one or two fastq files (in the case of paired-end reads). + +**Output directory: `results/fastq-dump`** + +* `sample.fastq` + * FastQ file to process, from the corresponding sample. + + +## seqkit & bbduk +[SeqKit](https://bioinf.shenwei.me/seqkit/) is a toolkit for fasta and fastq file manipulation, used in the pipeline if the positive/negative strands need to be flipped (dependent on library prep protocol). [BBDuk](https://www.geneious.com/plugins/bbduk/) is trimming tool used to filter reads for adapters, read quality, and overall length after adapter removal. + +**Output directory: `results/bbduk, qc/trimstats`** + +* `sample.trim.fastq` + * Trimmed FastQ file for each sample. +* `{refstats,trimstats,ehist}.txt` + * Trimming details including adapters removed, percentages of reads removed that did not meet minimum quality/length + ## FastQC [FastQC](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/) gives general quality metrics about your reads. It provides information about the quality score distribution across your reads, the per base sequence content (%T/A/G/C). You get information about adapter contamination and other overrepresented sequences. For further reading and documentation see the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/). -> **NB:** The FastQC plots displayed in the MultiQC report shows _untrimmed_ reads. They may contain adapter sequence and potentially regions with low quality. To see how your reads look after trimming, look at the FastQC reports in the `trim_galore` directory. +> **NB:** The FastQC plots displayed in the MultiQC report shows both untrimmed and trimmed reads. -**Output directory: `results/fastqc`** +**Output directory: `results/qc`** * `sample_fastqc.html` - * FastQC report, containing quality metrics for your untrimmed raw fastq files + * FastQC report, containing quality metrics for your untrimmed raw fastq files & trimmed fastq files * `zips/sample_fastqc.zip` * zip file containing the FastQC report, tab-delimited data file and plot images ## MultiQC -[MultiQC](http://multiqc.info) is a visualisation tool that generates a single HTML report summarising all samples in your project. Most of the pipeline QC results are visualised in the report and further statistics are available in within the report data directory. +[MultiQC](https://multiqc.info) is a visualisation tool that generates a single HTML report summarising all samples in your project. Most of the pipeline QC results are visualised in the report and further statistics are available in within the report data directory. The pipeline has special steps which allow the software versions used to be reported in the MultiQC output for future traceability. @@ -38,4 +66,104 @@ The pipeline has special steps which allow the software versions used to be repo * `Project_multiqc_data/` * Directory containing parsed statistics from the different tools used in the pipeline -For more information about how to use MultiQC reports, see http://multiqc.info +For more information about how to use MultiQC reports, see [https://multiqc.info](https://multiqc.info) + + +## hisat2 +[HISAT2](https://ccb.jhu.edu/software/hisat2/index.shtml) is a sequence alignment tool to map the trimmed sequenced reads to the corresponding reference genome. Due to their size, the resulting sam files are not conserved after the pipeline has completed execution. + +If the necessary indices for mapping are not provided/present, a separate process will build them first. This step can take a few minutes, however it should only be executed once. + +## samtools +[Samtools](http://www.htslib.org/) is a suite of tools to handle format conversions, among other things, for high-throughput sequencing data. We also use Samtools to generate the list of chromosome sizes, if not provided for the desired reference genome. + +**Output directory: `results/mapped/bams`** + +* `sample.trim.sorted.bam` + * Mapped sample in BAM format +* `sample.trim.sorted.bam.bai` + * Index for the `sample.trim.sorted.bam` mapped sample in BAM format + +**Output directory: `results/qc/mapstats`** + +* `sample.trim.sorted.bam.flagstat` + * Overall mapping statistics +* `sample.trim.sorted.bam.millionsmapped` + * File that contains number of uniquely mapped reads (not total multi-mapped). Used in normalization + + +## preseq +[Preseq](http://smithlabresearch.org/software/preseq/) plots the estimated complexity of a sample, and estimates future yields for complexity if the sample is sequenced at higher read depths. + +**Output directory: `results/qc/preseq`** + +* `sample.trim.c_curve.txt` + * Curve generated based on number of unique reads vs. total reads sequenced +* `sample.trim.lc_extrap.txt` + * Extrapolation of the c_curve that attempts to model the predicted number of unique reads if the sample was seqeunced to a greater depth + + +## rseqc +[RSeQC](http://dldcc-web.brc.bcm.edu/lilab/liguow/CGI/rseqc/_build/html/) provides a number of useful modules that can comprehensively evaluate high throughput sequence data. We use it on this pipeline to analyze read distributions. + +**Output directory: `results/qc/rseqc`** + +* `sample.trim.read_dist.txt` + * Relative distribution of reads relative to a gene reference file + + +## pileup +[BBMap](https://github.com/BioInfoTools/BBMap/blob/master/sh/pileup.sh) includes a tool called `pileup`, which analyzes the sequencing coverage for each sample. + +**Output directory: `results/qc/pileup`** + +* `sample.trim.coverage.hist.txt` + * Histogram of read coverage over each chromosome +* `sample.trim.coverage.stats.txt` + * Coverage stats broken down by chromosome including %GC, pos/neg read coverage, total coverage, etc. + + +## bedtools +[bedtools](https://bedtools.readthedocs.io/en/latest/) is an extensive toolkit for BED and bedGraph format manipulation, like sorting, intersecting and joining these files. The files produced here are useful to be processed later using [Tfit](https://github.com/Dowell-Lab/Tfit) or [dReg](https://github.com/Danko-Lab/dREG) to find regions of active transcription, and transcription regulatory elements. + +**Output directory: `results/mapped/bedgraphs`** + +* `sample.trim.bedGraph` + * Sample coverage file in bedGraph format +* `sample.trim.pos.bedGraph` + * Sample coverage file (positive strand only) in bedGraph format +* `sample.trim.neg.bedGraph` + * Sample coverage file (negative strand only) in bedGraph format + +**Output directory: `results/mapped/rcc_bedgraphs`** + +* `sample.trim.rcc.bedGraph` + * Normalized sample coverage file in bedGraph format +* `sample.pos.trim.rcc.bedGraph` + * Normalized sample coverage file (positive strand only) in bedGraph format +* `sample.neg.trim.rcc.bedGraph` + * Normalized sample coverage file (negative strand only) in bedGraph format + +**Output directory: `results/mapped/dreg_input`** + +* `sample.trim.pos.rcc.bw` + * Sample coverage file (positive strand only) in BigWig format +* `sample.trim.neg.rcc.bw` + * Sample coverage file (negative strand only) in BigWig format + +**Output directory: `results/mapped/rcc_bigwig`** + +* `sample.trim.pos.rcc.bw` + * Normalized sample coverage file (positive strand only) in BigWig format +* `sample.trim.neg.rcc.bw` + * Normalized sample coverage file (negative strand only) in BigWig format + + +## igvtools +[igvtools](https://software.broadinstitute.org/software/igv/igvtools) is a commandline tool we use to produce a compressed version of the sample coverage file in order to visualize it on IGV more efficiently (with a significantly smaller memory footprint). + +**Output directory: `results/mapped/tdfs`** + +* `sample.trim.rpkm.tdf` + * Sample coverage file in TDF format + diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md deleted file mode 100644 index bbf339a9..00000000 --- a/docs/troubleshooting.md +++ /dev/null @@ -1,28 +0,0 @@ -# nf-core/nascent: Troubleshooting - -## Input files not found - -If only no file, only one input file , or only read one and not read two is picked up then something is wrong with your input file declaration - -1. The path must be enclosed in quotes (`'` or `"`) -2. The path must have at least one `*` wildcard character. This is even if you are only running one paired end sample. -3. When using the pipeline with paired end data, the path must use `{1,2}` or `{R1,R2}` notation to specify read pairs. -4. If you are running Single end data make sure to specify `--singleEnd` - -If the pipeline can't find your files then you will get the following error - -``` -ERROR ~ Cannot find any reads matching: *{1,2}.fastq.gz -``` - -Note that if your sample name is "messy" then you have to be very particular with your glob specification. A file name like `L1-1-D-2h_S1_L002_R1_001.fastq.gz` can be difficult enough for a human to read. Specifying `*{1,2}*.gz` wont work give you what you want Whilst `*{R1,R2}*.gz` will. - - -## Data organization -The pipeline can't take a list of multiple input files - it takes a glob expression. If your input files are scattered in different paths then we recommend that you generate a directory with symlinked files. If running in paired end mode please make sure that your files are sensibly named so that they can be properly paired. See the previous point. - -## Extra resources and getting help -If you still have an issue with running the pipeline then feel free to contact us. -Have a look at the [pipeline website](https://github.com/nf-core/nascent) to find out how. - -If you have problems that are related to Nextflow and not our pipeline then check out the [Nextflow gitter channel](https://gitter.im/nextflow-io/nextflow) or the [google group](https://groups.google.com/forum/#!forum/nextflow). diff --git a/docs/usage.md b/docs/usage.md index c5611d34..4ec32ec3 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -1,13 +1,12 @@ # nf-core/nascent: Usage ## Table of contents - * [Introduction](#general-nextflow-info) * [Running the pipeline](#running-the-pipeline) * [Updating the pipeline](#updating-the-pipeline) * [Reproducibility](#reproducibility) * [Main arguments](#main-arguments) - * [`-profile`](#-profile-single-dash) + * [`-profile`](#-profile-single-dash) * [`docker`](#docker) * [`awsbatch`](#awsbatch) * [`standard`](#standard) @@ -17,28 +16,31 @@ * [`--reads`](#--reads) * [`--singleEnd`](#--singleend) * [Reference Genomes](#reference-genomes) - * [`--genome`](#--genome) - * [`--fasta`](#--fasta) + * [`--genome`](#--genome) + * [`--fasta`](#--fasta) * [Job Resources](#job-resources) * [Automatic resubmission](#automatic-resubmission) * [Custom resource requests](#custom-resource-requests) * [AWS batch specific parameters](#aws-batch-specific-parameters) - * [`-awsbatch`](#-awsbatch) - * [`--awsqueue`](#--awsqueue) - * [`--awsregion`](#--awsregion) + * [`-awsbatch`](#-awsbatch) + * [`--awsqueue`](#--awsqueue) + * [`--awsregion`](#--awsregion) * [Other command line parameters](#other-command-line-parameters) - * [`--outdir`](#--outdir) - * [`--email`](#--email) - * [`-name`](#-name-single-dash) - * [`-resume`](#-resume-single-dash) - * [`-c`](#-c-single-dash) - * [`--max_memory`](#--max_memory) - * [`--max_time`](#--max_time) - * [`--max_cpus`](#--max_cpus) - * [`--plaintext_emails`](#--plaintext_emails) - * [`--sampleLevel`](#--sampleLevel) - * [`--multiqc_config`](#--multiqc_config) - + * [`--outdir`](#--outdir) + * [`--email`](#--email) + * [`-name`](#-name-single-dash) + * [`-resume`](#-resume-single-dash) + * [`-c`](#-c-single-dash) + * [`--max_memory`](#--max_memory) + * [`--max_time`](#--max_time) + * [`--max_cpus`](#--max_cpus) + * [`--plaintext_emails`](#--plaintext_emails) + * [`--sampleLevel`](#--sampleLevel) + * [`--multiqc_config`](#--multiqc_config) + * [`--chrom_sizes`](#--chrom_sizes) + * [`--hisat_indices`](#--hisat_indices) + * [`--genome_refseq`](#--genome_refseq) + * [`--sras`](#--sras) ## General Nextflow info Nextflow handles job submissions on SLURM or other environments, and supervises running the jobs. Thus the Nextflow process must run until the pipeline is finished. We recommend that you put the process running in the background through `screen` / `tmux` or similar tool. Alternatively you can run nextflow within a cluster job submitted your job scheduler. @@ -51,6 +53,7 @@ NXF_OPTS='-Xms1g -Xmx4g' ## Running the pipeline The typical command for running the pipeline is as follows: + ```bash nextflow run nf-core/nascent --reads '*_R{1,2}.fastq.gz' -profile standard,docker ``` @@ -84,35 +87,23 @@ This version number will be logged in reports when you run the pipeline, so that ## Main Arguments ### `-profile` -Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments. Note that multiple profiles can be loaded, for example: `-profile standard,docker` - the order of arguments is important! +Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments. -* `standard` - * The default profile, used if `-profile` is not specified at all. - * Runs locally and expects all software to be installed and available on the `PATH`. * `docker` - * A generic configuration profile to be used with [Docker](http://docker.com/) - * Pulls software from dockerhub: [`nfcore/nascent`](http://hub.docker.com/r/nfcore/nascent/) + * A generic configuration profile to be used with [Docker](http://docker.com/) + * Pulls software from dockerhub: [`nfcore/nascent`](http://hub.docker.com/r/nfcore/nascent/) * `singularity` - * A generic configuration profile to be used with [Singularity](http://singularity.lbl.gov/) - * Pulls software from singularity-hub + * A generic configuration profile to be used with [Singularity](http://singularity.lbl.gov/) + * Pulls software from singularity-hub * `conda` - * A generic configuration profile to be used with [conda](https://conda.io/docs/) - * Pulls most software from [Bioconda](https://bioconda.github.io/) -* `binac` - * A profile for the [BinAC](https://www.bwhpc-c5.de/wiki/index.php/Category:BwForCluster_BinAC) cluster - * Pulls images via Singularity from Dockerhub automatically -* `cfc` - * A profile for the Core Facility Cluster at QBiC Tuebingen - * Pulls images via Singularity from Dockerhub automatically + * A generic configuration profile to be used with [conda](https://conda.io/docs/) + * Pulls most software from [Bioconda](https://bioconda.github.io/) * `awsbatch` - * A generic configuration profile to be used with AWS Batch. + * A generic configuration profile to be used with AWS Batch. * `test` - * A profile with a complete configuration for automated testing - * Includes links to test data so needs no other parameters -* `none` - * No configuration at all. Useful if you want to build your own config from scratch and want to avoid loading in the default `base` config profile (not recommended). + * A profile with a complete configuration for automated testing + * Includes links to test data so needs no other parameters - ### `--reads` Use this to specify the location of your input FastQ files. For example: @@ -138,7 +129,7 @@ By default, the pipeline expects paired-end data. If you have single-end data, y It is not possible to run a mixture of single-end and paired-end files in one run. -## Reference Genomes +## Reference genomes The pipeline config files come bundled with paths to the illumina iGenomes reference index files. If running with docker or AWS, the configuration is set up to use the [AWS-iGenomes](https://ewels.github.io/AWS-iGenomes/) resource. @@ -162,7 +153,6 @@ Note that you can use the same configuration setup to save sets of reference fil The syntax for this reference configuration is as follows: - ```nextflow params { genomes { @@ -174,7 +164,6 @@ params { } ``` - ### `--fasta` If you prefer, you can specify the full path to your reference genome when you run the pipeline: @@ -182,12 +171,19 @@ If you prefer, you can specify the full path to your reference genome when you r --fasta '[path to Fasta reference]' ``` -## Job Resources +### `--igenomesIgnore` +Do not load `igenomes.config` when running the pipeline. You may choose this option if you observe clashes between custom parameters and those supplied in `igenomes.config`. + +## Job resources ### Automatic resubmission Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the steps in the pipeline, if the job exits with an error code of `143` (exceeded requested resources) it will automatically resubmit with higher requests (2 x original, then 3 x original). If it still fails after three times then the pipeline is stopped. ### Custom resource requests -Wherever process-specific requirements are set in the pipeline, the default value can be changed by creating a custom config file. See the files in [`conf`](../conf) for examples. +Wherever process-specific requirements are set in the pipeline, the default value can be changed by creating a custom config file. See the files hosted at [`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf) for examples. + +If you are likely to be running `nf-core` pipelines regularly it may be a good idea to request that your custom config file is uploaded to the `nf-core/configs` git repository. Before you do this please can you test that the config file works with your pipeline of choice using the `-c` parameter (see definition below). You can then create a pull request to the `nf-core/configs` repository with the addition of your config file, associated documentation file (see examples in [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs)), and amending [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) to include your custom profile. + +If you have any questions or issues please send us a message on [Slack](https://nf-core-invite.herokuapp.com/). ## AWS Batch specific parameters Running the pipeline on AWS Batch requires a couple of specific parameters to be set according to your AWS Batch configuration. Please use the `-awsbatch` profile and then specify all of the following parameters. @@ -200,13 +196,11 @@ Please make sure to also set the `-w/--work-dir` and `--outdir` parameters to a ## Other command line parameters - - ### `--outdir` The output directory where the results will be saved. ### `--email` -Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits. If set in your user config file (`~/.nextflow/config`) then you don't need to speicfy this on the command line for every run. +Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits. If set in your user config file (`~/.nextflow/config`) then you don't need to specify this on the command line for every run. ### `-name` Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic. @@ -229,9 +223,37 @@ Specify the path to a specific config file (this is a core NextFlow command). Note - you can use this to override pipeline defaults. +### `--custom_config_version` +Provide git commit id for custom Institutional configs hosted at `nf-core/configs`. This was implemented for reproducibility purposes. Default is set to `master`. + +```bash +## Download and use config file with following git commid id +--custom_config_version d52db660777c4bf36546ddb188ec530c3ada1b96 +``` + +### `--custom_config_base` +If you're running offline, nextflow will not be able to fetch the institutional config files +from the internet. If you don't need them, then this is not a problem. If you do need them, +you should download the files from the repo and tell nextflow where to find them with the +`custom_config_base` option. For example: + +```bash +## Download and unzip the config files +cd /path/to/my/configs +wget https://github.com/nf-core/configs/archive/master.zip +unzip master.zip + +## Run the pipeline +cd /path/to/my/data +nextflow run /path/to/pipeline/ --custom_config_base /path/to/my/configs/configs-master/ +``` + +> Note that the nf-core/tools helper package has a `download` command to download all required pipeline +> files + singularity containers + institutional configs in one go for you, to make this process easier. + ### `--max_memory` Use to set a top-limit for the default memory requirement for each process. -Should be a string in the format integer-unit. eg. `--max_memory '8.GB'`` +Should be a string in the format integer-unit. eg. `--max_memory '8.GB'` ### `--max_time` Use to set a top-limit for the default time requirement for each process. @@ -244,5 +266,20 @@ Should be a string in the format integer-unit. eg. `--max_cpus 1` ### `--plaintext_email` Set to receive plain-text e-mails instead of HTML formatted. -### `--multiqc_config` +### `--monochrome_logs` +Set to disable colourful command line output and live life in monochrome. + +### `--multiqc_config` Specify a path to a custom MultiQC configuration file. + +### `--chrom_sizes` +Specify a path to a file listing the number of nucleotides on each chromosome, for the reference quenome in question. + +### `--hisat_indices` +Specify a path to the Hisat2 index directory. If not provided, hese indices will be generated the first time this pipeline is executed. + +### `--genome_refseq` +Specify a path to the RefSeq genome annotation file. Optional, but useful to collect stats via RseQC. + +### `--sras` +Specify a path to a directory (can use regular expressions) containing SRR files obtained from the Gene Expression Omnibus (GEO) platform. This is an alternative to providing fastq files if re-analizing existing public datasets. diff --git a/environment.yml b/environment.yml index c6cb7276..3f2240bc 100644 --- a/environment.yml +++ b/environment.yml @@ -4,15 +4,15 @@ channels: - bioconda - defaults dependencies: - - fastqc=0.11.5 - - multiqc=1.6 + - fastqc=0.11.8 + - multiqc=1.7 - hisat2=2.1.0 - - samtools=1.8 + - samtools=1.9 - preseq=2.0.3 - - seqkit=0.9.0 - - bedtools=2.25.0 - - igvtools=2.3.75 - - bbmap=38.06 + - seqkit=0.10.1 + - bedtools=2.28.0 + - igvtools=2.3.93 + - bbmap=38.22 - fastx_toolkit=0.0.14 - sra-tools=2.9.1 - rseqc=3.0.0 diff --git a/main.nf b/main.nf index 9e38f5fe..0cb4009c 100644 --- a/main.nf +++ b/main.nf @@ -11,7 +11,6 @@ Ignacio Tripodi Margaret Gruca ======================================================================================== -======================================================================================== Pipeline steps: @@ -45,32 +44,25 @@ Pipeline steps: 9. Pipeline report +======= */ def helpMessage() { + log.info nfcoreHeader() log.info""" - ======================================================= - ,--./,-. - ___ __ __ __ ___ /,-._.--~\' - |\\ | |__ __ / ` / \\ |__) |__ } { - | \\| | \\__, \\__/ | \\ |___ \\`-._,-`-, - `._,._,\' - - nf-core/nascent v${workflow.manifest.version} - ======================================================= Usage: The typical command for running the pipeline is as follows: - nextflow run nf-core/nascent -profile slurm --fastqs '/project/*_{R1,R2}*.fastq' --outdir '/project/' + nextflow run nf-core/nascent -profile slurm --reads '/project/*_{R1,R2}*.fastq' --outdir '/project/' nextflow run nf-core/nascent --reads '*_R{1,2}.fastq.gz' -profile standard,docker Required arguments: -profile Configuration profile to use. - --fastqs Directory pattern for fastq files: /project/*{R1,R2}*.fastq (Required if --sras not specified) - --sras Directory pattern for SRA files: /project/*.sras (Required if --fastqs not specified) + --reads Directory pattern for fastq files: /project/*{R1,R2}*.fastq (Required if --sras not specified) + --sras Directory pattern for SRA files: /project/*.sras (Required if --reads not specified) --workdir Nextflow working directory where all intermediate files are saved. --email Where to send workflow report email. @@ -100,8 +92,7 @@ def helpMessage() { * SET UP CONFIGURATION VARIABLES */ -// Show help message -params.help = false +// Show help emssage if (params.help){ helpMessage() exit 0 @@ -117,8 +108,10 @@ params.bedGraphToBigWig = "$baseDir/bin/bedGraphToBigWig" params.rcc = "$baseDir/bin/rcc.py" params.workdir = "./nextflowTemp" -multiqc_config = file(params.multiqc_config) -output_docs = file("$baseDir/docs/output.md") + +// Stage config files +ch_multiqc_config = Channel.fromPath(params.multiqc_config) +ch_output_docs = Channel.fromPath("$baseDir/docs/output.md") // Validate inputs @@ -172,43 +165,52 @@ if( !(workflow.runName ==~ /[a-z]+_[a-z]+/) ){ if( workflow.profile == 'awsbatch') { - // AWSBatch sanity checking - if (!params.awsqueue || !params.awsregion) exit 1, "Specify correct --awsqueue and --awsregion parameters on AWSBatch!" - if (!workflow.workDir.startsWith('s3') || !params.outdir.startsWith('s3')) exit 1, "Specify S3 URLs for workDir and outdir parameters on AWSBatch!" - // Check workDir/outdir paths to be S3 buckets if running on AWSBatch + // Check outdir paths to be S3 buckets if running on AWSBatch // related: https://github.com/nextflow-io/nextflow/issues/813 - if (!workflow.workDir.startsWith('s3:') || !params.outdir.startsWith('s3:')) exit 1, "Workdir or Outdir not on S3 - specify S3 Buckets for each to run on AWSBatch!" + if (!params.outdir.startsWith('s3:')) exit 1, "Outdir not on S3 - specify S3 Bucket to run on AWSBatch!" + // Prevent trace files to be stored on S3 since S3 does not support rolling files. + if (workflow.tracedir.startsWith('s3:')) exit 1, "Specify a local tracedir or run without trace! S3 cannot be used for tracefiles." } /* * Create a channel for input read files */ -if (params.fastqs) { + +if(params.readPaths){ + if(params.singleEnd){ + Channel + .from(params.readPaths) + .map { row -> [ row[0], [file(row[1][0])]] } + .dump() + .ifEmpty { exit 1, "params.readPaths was empty - no input files supplied" } + .into { fastq_reads_qc; fastq_reads_trim; fastq_reads_gzip } + } else { + Channel + .from(params.readPaths) + .map { row -> [ row[0], [file(row[1][0]), file(row[1][1])]] } + .ifEmpty { exit 1, "params.readPaths was empty - no input files supplied" } + .dump() + .into { fastq_reads_qc; fastq_reads_trim; fastq_reads_gzip } + } +} else { if (params.singleEnd) { fastq_reads_qc = Channel - .fromPath(params.fastqs) + .fromPath(params.reads) .map { file -> tuple(file.baseName, file) } fastq_reads_trim = Channel - .fromPath(params.fastqs) + .fromPath(params.reads) .map { file -> tuple(file.baseName, file) } fastq_reads_gzip = Channel - .fromPath(params.fastqs) + .fromPath(params.reads) .map { file -> tuple(file.baseName, file) } } else { Channel - .fromFilePairs( params.fastqs, size: params.singleEnd ? 1 : 2 ) + .fromFilePairs( params.reads, size: params.singleEnd ? 1 : 2 ) .ifEmpty { exit 1, "Cannot find any reads matching: ${params.reads}\nNB: Path needs to be enclosed in quotes!\nIf this is single-end data, please specify --singleEnd on the command line." } .into { fastq_reads_qc; fastq_reads_trim; fastq_reads_gzip } } } -else { - Channel - .empty() - .into { fastq_reads_qc; fastq_reads_trim; fastq_reads_gzip } - params.fastqs = null -} - if (params.sras) { if (params.singleEnd) { println("Pattern for SRAs provided") @@ -225,26 +227,15 @@ if (params.sras) { else { read_files_sra = Channel.empty() -} - + } // Header log info -log.info """======================================================= - ,--./,-. - ___ __ __ __ ___ /,-._.--~\' - |\\ | |__ __ / ` / \\ |__) |__ } { - | \\| | \\__, \\__/ | \\ |___ \\`-._,-`-, - `._,._,\' - -nf-core/nascent v${workflow.manifest.version}" -=======================================================""" +log.info nfcoreHeader() def summary = [:] -summary['Pipeline Name'] = 'nf-core/nascent' -summary['Pipeline Version'] = workflow.manifest.version -summary['Run Name'] = custom_runName ?: workflow.runName +if(workflow.revision) summary['Pipeline Release'] = workflow.revision +summary['Run Name'] = custom_runName ?: workflow.runName summary['Save Reference'] = params.saveReference ? 'Yes' : 'No' -if(params.reads) summary['Reads'] = params.reads -if(params.fastqs) summary['Fastqs'] = params.fastqs +if(params.reads) summary['Fastqs'] = params.reads if(params.sras) summary['SRAs'] = params.sras summary['Genome Ref'] = params.fasta summary['Thread fqdump'] = params.threadfqdump ? 'YES' : 'NO' @@ -254,28 +245,30 @@ summary['Save fastq'] = params.savefq ? 'YES' : 'NO' summary['Save Trimmed'] = params.saveTrim ? 'YES' : 'NO' summary['Reverse Comp'] = params.flip ? 'YES' : 'NO' summary['Run MultiQC'] = params.skipMultiQC ? 'NO' : 'YES' -summary['Max Memory'] = params.max_memory -summary['Max CPUs'] = params.max_cpus -summary['Max Time'] = params.max_time +summary['Max Resources'] = "$params.max_memory memory, $params.max_cpus cpus, $params.max_time time per job" +if(workflow.containerEngine) summary['Container'] = "$workflow.containerEngine - $workflow.container" summary['Output dir'] = params.outdir +summary['Launch dir'] = workflow.launchDir summary['Working dir'] = workflow.workDir -summary['Container Engine'] = workflow.containerEngine -if(workflow.containerEngine) summary['Container'] = workflow.container -summary['Current home'] = "$HOME" -summary['Current user'] = "$USER" -summary['Current path'] = "$PWD" -summary['Working dir'] = workflow.workDir -summary['Output dir'] = params.outdir -summary['Script dir'] = workflow.projectDir -summary['Config Profile'] = workflow.profile +summary['Script dir'] = workflow.projectDir +summary['User'] = workflow.userName if(workflow.profile == 'awsbatch'){ - summary['AWS Region'] = params.awsregion - summary['AWS Queue'] = params.awsqueue + summary['AWS Region'] = params.awsregion + summary['AWS Queue'] = params.awsqueue } -if(params.email) summary['E-mail Address'] = params.email -log.info summary.collect { k,v -> "${k.padRight(15)}: $v" }.join("\n") -log.info "=========================================" +summary['Config Profile'] = workflow.profile +if(params.config_profile_description) summary['Config Description'] = params.config_profile_description +if(params.config_profile_contact) summary['Config Contact'] = params.config_profile_contact +if(params.config_profile_url) summary['Config URL'] = params.config_profile_url +if(params.email) { + summary['E-mail Address'] = params.email + summary['MultiQC maxsize'] = params.maxMultiqcEmailFileSize +} +log.info summary.collect { k,v -> "${k.padRight(18)}: $v" }.join("\n") +log.info "\033[2m----------------------------------------------------\033[0m" +// Check the hostnames against configured profiles +checkHostname() def create_workflow_summary(summary) { def yaml_file = workDir.resolve('workflow_summary_mqc.yaml') @@ -381,7 +374,6 @@ process sra_dump { /* * PREPROCESSING - Build HISAT2 index (borrowed from nf-core/rnaseq) */ -// TODO: do we need --ss and --exon? probably not, need to check what was the actual hisat2-builder arguments used to generate the indices we have on fiji if(!params.hisat2_indices && params.fasta){ process make_hisat_index { tag "$fasta" @@ -414,7 +406,6 @@ if(!params.hisat2_indices && params.fasta){ */ process fastqc { - validExitStatus 0,1 tag "$prefix" publishDir "${params.outdir}/qc/fastqc/", mode: 'copy', saveAs: {filename -> filename.indexOf(".zip") > 0 ? "zips/$filename" : "$filename"} @@ -428,10 +419,7 @@ process fastqc { script: prefix = reads.baseName """ - echo ${prefix} - fastqc $reads - extract_fastqc_stats.sh --srr=${prefix} > ${prefix}_stats_fastqc.txt """ } @@ -477,7 +465,6 @@ process bbduk { file "*.txt" into trim_stats script: -// prefix = fastq.baseName bbduk_mem = task.memory.toGiga() if (!params.singleEnd && params.flip) { """ @@ -531,8 +518,7 @@ process bbduk { refstats=${name}.refstats.txt \ ehist=${name}.ehist.txt """ - } - else if (!params.singleEnd) { + }else if (!params.singleEnd) { """ echo ${name} @@ -632,7 +618,6 @@ process hisat2 { // NOTE: this tool sends output there even in successful (exit code 0) // termination, so we have to ignore errors for now, and the next // process will blow up from missing a SAM file instead. - //errorStrategy 'ignore' tag "$name" validExitStatus 0,143 @@ -698,7 +683,7 @@ process samtools { script: prefix = mapped_sam.baseName -// Note that the millionsmapped arugments below are only good for SE data. When PE is added, it will need to be changed to: + // Note that the millionsmapped arugments below are only good for SE data. When PE is added, it will need to be changed to: // -F 0x40 rootname.sorted.bam | cut -f1 | sort | uniq | wc -l > rootname.bam.millionsmapped if (!params.singleEnd) { """ @@ -1042,7 +1027,7 @@ process multiqc { !params.skipMultiQC input: - file multiqc_config + file multiqc_config from ch_multiqc_config.collect() file (fastqc:'qc/fastqc/*') from fastqc_results.collect() file ('qc/fastqc/*') from trimmed_fastqc_results.collect() file ('qc/trimstats/*') from trim_stats.collect() @@ -1059,7 +1044,6 @@ process multiqc { rtitle = custom_runName ? "--title \"$custom_runName\"" : '' rfilename = custom_runName ? "--filename " + custom_runName.replaceAll('\\W','_').replaceAll('_+','_') + "_multiqc_report" : '' -//TO DO : Need to build a new multiqc container for the newest version """ export PATH=~/.local/bin:$PATH @@ -1097,10 +1081,25 @@ workflow.onComplete { if(workflow.repository) email_fields['summary']['Pipeline repository Git URL'] = workflow.repository if(workflow.commitId) email_fields['summary']['Pipeline repository Git Commit'] = workflow.commitId if(workflow.revision) email_fields['summary']['Pipeline Git branch/tag'] = workflow.revision + if(workflow.container) email_fields['summary']['Docker image'] = workflow.container email_fields['summary']['Nextflow Version'] = workflow.nextflow.version email_fields['summary']['Nextflow Build'] = workflow.nextflow.build email_fields['summary']['Nextflow Compile Timestamp'] = workflow.nextflow.timestamp + // On success try attach the multiqc report + def mqc_report = null + try { + if (workflow.success) { + mqc_report = multiqc_report.getVal() + if (mqc_report.getClass() == ArrayList){ + log.warn "[nf-core/nascent] Found multiple reports from process 'multiqc', will use only one" + mqc_report = mqc_report[0] + } + } + } catch (all) { + log.warn "[nf-core/nascent] Could not attach MultiQC report to summary email" + } + // Render the TXT template def engine = new groovy.text.GStringTemplateEngine() def tf = new File("$baseDir/assets/email_template.txt") @@ -1113,7 +1112,7 @@ workflow.onComplete { def email_html = html_template.toString() // Render the sendmail template - def smail_fields = [ email: params.email, subject: subject, email_txt: email_txt, email_html: email_html, baseDir: "$baseDir" ] + def smail_fields = [ email: params.email, subject: subject, email_txt: email_txt, email_html: email_html, baseDir: "$baseDir", mqcFile: mqc_report, mqcMaxSize: params.maxMultiqcEmailFileSize.toBytes() ] def sf = new File("$baseDir/assets/sendmail_template.txt") def sendmail_template = engine.createTemplate(sf).make(smail_fields) def sendmail_html = sendmail_template.toString() @@ -1133,7 +1132,7 @@ workflow.onComplete { } // Write summary e-mail HTML to a file - def output_d = new File( "${params.outdir}/Documentation/" ) + def output_d = new File( "${params.outdir}/pipeline_info/" ) if( !output_d.exists() ) { output_d.mkdirs() } @@ -1142,6 +1141,67 @@ workflow.onComplete { def output_tf = new File( output_d, "pipeline_report.txt" ) output_tf.withWriter { w -> w << email_txt } - log.info "[nf-core/nascent] Pipeline Complete" + c_reset = params.monochrome_logs ? '' : "\033[0m"; + c_purple = params.monochrome_logs ? '' : "\033[0;35m"; + c_green = params.monochrome_logs ? '' : "\033[0;32m"; + c_red = params.monochrome_logs ? '' : "\033[0;31m"; + + if (workflow.stats.ignoredCountFmt > 0 && workflow.success) { + log.info "${c_purple}Warning, pipeline completed, but with errored process(es) ${c_reset}" + log.info "${c_red}Number of ignored errored process(es) : ${workflow.stats.ignoredCountFmt} ${c_reset}" + log.info "${c_green}Number of successfully ran process(es) : ${workflow.stats.succeedCountFmt} ${c_reset}" + } + + if(workflow.success){ + log.info "${c_purple}[nf-core/nascent]${c_green} Pipeline completed successfully${c_reset}" + } else { + checkHostname() + log.info "${c_purple}[nf-core/nascent]${c_red} Pipeline completed with errors${c_reset}" + } + +} + +def nfcoreHeader(){ + // Log colors ANSI codes + c_reset = params.monochrome_logs ? '' : "\033[0m"; + c_dim = params.monochrome_logs ? '' : "\033[2m"; + c_black = params.monochrome_logs ? '' : "\033[0;30m"; + c_green = params.monochrome_logs ? '' : "\033[0;32m"; + c_yellow = params.monochrome_logs ? '' : "\033[0;33m"; + c_blue = params.monochrome_logs ? '' : "\033[0;34m"; + c_purple = params.monochrome_logs ? '' : "\033[0;35m"; + c_cyan = params.monochrome_logs ? '' : "\033[0;36m"; + c_white = params.monochrome_logs ? '' : "\033[0;37m"; + + return """ ${c_dim}----------------------------------------------------${c_reset} + ${c_green},--.${c_black}/${c_green},-.${c_reset} + ${c_blue} ___ __ __ __ ___ ${c_green}/,-._.--~\'${c_reset} + ${c_blue} |\\ | |__ __ / ` / \\ |__) |__ ${c_yellow}} {${c_reset} + ${c_blue} | \\| | \\__, \\__/ | \\ |___ ${c_green}\\`-._,-`-,${c_reset} + ${c_green}`._,._,\'${c_reset} + ${c_purple} nf-core/nascent v${workflow.manifest.version}${c_reset} + ${c_dim}----------------------------------------------------${c_reset} + """.stripIndent() +} + +def checkHostname(){ + def c_reset = params.monochrome_logs ? '' : "\033[0m" + def c_white = params.monochrome_logs ? '' : "\033[0;37m" + def c_red = params.monochrome_logs ? '' : "\033[1;91m" + def c_yellow_bold = params.monochrome_logs ? '' : "\033[1;93m" + if(params.hostnames){ + def hostname = "hostname".execute().text.trim() + params.hostnames.each { prof, hnames -> + hnames.each { hname -> + if(hostname.contains(hname) && !workflow.profile.contains(prof)){ + log.error "====================================================\n" + + " ${c_red}WARNING!${c_reset} You are running with `-profile $workflow.profile`\n" + + " but your machine hostname is ${c_white}'$hostname'${c_reset}\n" + + " ${c_yellow_bold}It's highly recommended that you use `-profile $prof${c_reset}`\n" + + "============================================================" + } + } + } + } } diff --git a/nextflow.config b/nextflow.config index 48ec7a2c..c253a402 100644 --- a/nextflow.config +++ b/nextflow.config @@ -3,88 +3,87 @@ * nf-core/nascent Nextflow config file * ------------------------------------------------- * Default config options for all environments. - * Cluster-specific config options should be saved - * in the conf folder and imported under a profile - * name here. */ // Global default params, used in configs params { - - // Container slug. Stable releases should specify release tag! - // Developmental code should specify :latest - container = 'ignaciot/nascent:latest' - - // Workflow flags - //reads = "data/*{R1,R2}*.fastq" - //fastqs= "data/*{R1,R2}*.fastq" - //sras= "data/*{R1,R2}*.sra" - outdir = './results' -// Run arguments + // Run arguments workdir = false clusterOptions = false flip = false - email=false - help = false - igenomes_base = "./iGenomes" - tracedir = "${params.outdir}/pipeline_info" -// nf_required_version = nf_required_version - outdir = './results' - pairedEnd = false saveAllfq = false savefq = false saveTrim = false skipMultiQC = false threadfqdump = false -// version = version + + // File listing the number of nucleotides per chromosome for the reference genome used. + // Will be generated the first time each genome is used to process datasets. + chrom_sizes = 0 + + // Path to the Hisat2 index directory. If not provided, hese indices will be generated + // the first time this pipeline is executed. + hisat2_indices = 0 + + // Path to the RefSeq genome annotation file. Optional, but useful to collect stats via RseQC. + genome_refseq = 0 + + // Path to SRR files obtained from the Gene Expression Omnibus (GEO) platform. This is an + // alternative to providing fastq files if re-analizing existing public datasets. + sras = 0 + + reads = "data/*{R1,R2}*.fastq" + singleEnd = true + outdir = './results' + + // Boilerplate options + name = false + multiqc_config = "$baseDir/assets/multiqc_config.yaml" + email = false + maxMultiqcEmailFileSize = 25.MB + plaintext_email = false + monochrome_logs = false + help = false + igenomes_base = "./iGenomes" + tracedir = "${params.outdir}/pipeline_info" + awsqueue = false + awsregion = 'eu-west-1' + igenomesIgnore = false + custom_config_version = 'master' + custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}" + hostnames = false + config_profile_description = false + config_profile_contact = false + config_profile_url = false +} + +// Container slug. Stable releases should specify release tag! +// Developmental code should specify :dev +process.container = 'nfcore/nascent:1.0' + +// Load base.config by default for all pipelines +includeConfig 'conf/base.config' + +// Load nf-core custom profiles from different Institutions +try { + includeConfig "${params.custom_config_base}/nfcore_custom.config" +} catch (Exception e) { + System.err.println("WARNING: Could not load nf-core/config profiles: ${params.custom_config_base}/nfcore_custom.config") } profiles { - awsbatch { - includeConfig 'conf/base.config' - includeConfig 'conf/awsbatch.config' - includeConfig 'conf/igenomes.config' - } - binac { - includeConfig 'conf/base.config' - includeConfig 'conf/binac.config' - } - cfc { - includeConfig 'conf/base.config' - includeConfig 'conf/cfc.config' - } + awsbatch { includeConfig 'conf/awsbatch.config' } conda { process.conda = "$baseDir/environment.yml" } debug { process.beforeScript = 'echo $HOSTNAME' } - docker { - docker.enabled = true - process.container = params.container - } - none { - // Don't load any config (for use with custom home configs) - } - singularity { - singularity.enabled = true - process.container = {"shub://${params.container.replace('nfcore', 'nf-core')}"} - } - standard { - includeConfig 'conf/base.config' - } - test { - includeConfig 'conf/base.config' - includeConfig 'conf/test.config' - } - uzh { - includeConfig 'conf/base.config' - includeConfig 'conf/uzh.config' - } - slurm { - workDir = params.tracedir - process.executor = 'slurm' - process.queue = 'short' - includeConfig 'conf/base.config' - includeConfig 'conf/slurm.config' - } + docker { docker.enabled = true } + singularity { singularity.enabled = true } + test { includeConfig 'conf/test.config' } +} + +// Load igenomes.config if required +if(!params.igenomesIgnore){ + includeConfig 'conf/igenomes.config' } // Capture exit codes from upstream processes when piping @@ -92,19 +91,19 @@ process.shell = ['/bin/bash', '-euo', 'pipefail'] timeline { enabled = true - file = "${params.tracedir}/nf-core/nascent_timeline.html" + file = "${params.tracedir}/execution_timeline.html" } report { enabled = true - file = "${params.tracedir}/nf-core/nascent_report.html" + file = "${params.tracedir}/execution_report.html" } trace { enabled = true - file = "${params.tracedir}/nf-core/nascent_trace.txt" + file = "${params.tracedir}/execution_trace.txt" } dag { enabled = true - file = "${params.tracedir}/nf-core/nascent_dag.svg" + file = "${params.tracedir}/pipeline_dag.svg" } manifest {