From 3e91dcd324876319a27567413d7f24b4772b0c8b Mon Sep 17 00:00:00 2001
From: nf-core-bot <core@nf-co.re>
Date: Tue, 30 Aug 2022 13:31:08 +0000
Subject: [PATCH] Template update for nf-core/tools version 2.5

---
 .editorconfig                                 |   2 +-
 .github/PULL_REQUEST_TEMPLATE.md              |   3 +-
 .github/workflows/ci.yml                      |  23 ++-----
 .github/workflows/linting.yml                 |  38 ++++++++++--
 CHANGELOG.md                                  |   2 +-
 CITATION.cff                                  |  56 ++++++++++++++++++
 README.md                                     |  21 +++----
 assets/email_template.txt                     |   1 -
 bin/check_samplesheet.py                      |  41 +++++++------
 conf/base.config                              |   5 ++
 docs/images/nf-core-chipseq_logo_dark.png     | Bin 73947 -> 73955 bytes
 docs/usage.md                                 |  12 ++--
 lib/WorkflowChipseq.groovy                    |   5 +-
 lib/WorkflowMain.groovy                       |   9 ++-
 main.nf                                       |   2 +-
 modules.json                                  |  22 ++++---
 .../templates/dumpsoftwareversions.py         |  14 +++--
 nextflow.config                               |  23 ++++++-
 18 files changed, 186 insertions(+), 93 deletions(-)
 create mode 100644 CITATION.cff

diff --git a/.editorconfig b/.editorconfig
index b6b31907..b78de6e6 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -8,7 +8,7 @@ trim_trailing_whitespace = true
 indent_size = 4
 indent_style = space
 
-[*.{md,yml,yaml,html,css,scss,js}]
+[*.{md,yml,yaml,html,css,scss,js,cff}]
 indent_size = 2
 
 # These files are edited and tested upstream in nf-core/modules
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 9595a3a4..8da27af0 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -15,8 +15,7 @@ Learn more about contributing: [CONTRIBUTING.md](https://github.com/nf-core/chip
 
 - [ ] This comment contains a description of changes (with reason).
 - [ ] If you've fixed a bug or added code that should be tested, add tests!
-  - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/chipseq/tree/master/.github/CONTRIBUTING.md)
-  - [ ] If necessary, also make a PR on the nf-core/chipseq _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository.
+- [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/chipseq/tree/master/.github/CONTRIBUTING.md)- [ ] If necessary, also make a PR on the nf-core/chipseq _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository.
 - [ ] Make sure your code lints (`nf-core lint`).
 - [ ] Ensure the test suite passes (`nextflow run . -profile test,docker --outdir <OUTDIR>`).
 - [ ] Usage Documentation in `docs/usage.md` is updated.
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 0596f36f..cf99db7f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,7 +10,6 @@ on:
 
 env:
   NXF_ANSI_LOG: false
-  CAPSULE_LOG: none
 
 jobs:
   test:
@@ -20,27 +19,17 @@ jobs:
     runs-on: ubuntu-latest
     strategy:
       matrix:
-        # Nextflow versions
-        include:
-          # Test pipeline minimum Nextflow version
-          - NXF_VER: "21.10.3"
-            NXF_EDGE: ""
-          # Test latest edge release of Nextflow
-          - NXF_VER: ""
-            NXF_EDGE: "1"
+        NXF_VER:
+          - "21.10.3"
+          - "latest-everything"
     steps:
       - name: Check out pipeline code
         uses: actions/checkout@v2
 
       - name: Install Nextflow
-        env:
-          NXF_VER: ${{ matrix.NXF_VER }}
-          # Uncomment only if the edge release is more recent than the latest stable release
-          # See https://github.com/nextflow-io/nextflow/issues/2467
-          # NXF_EDGE: ${{ matrix.NXF_EDGE }}
-        run: |
-          wget -qO- get.nextflow.io | bash
-          sudo mv nextflow /usr/local/bin/
+        uses: nf-core/setup-nextflow@v1
+        with:
+          version: "${{ matrix.NXF_VER }}"
 
       - name: Run pipeline with test data
         # TODO nf-core: You can customise CI pipeline run tests as required
diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml
index 77358dee..8a5ce69b 100644
--- a/.github/workflows/linting.yml
+++ b/.github/workflows/linting.yml
@@ -35,6 +35,36 @@ jobs:
       - name: Run Prettier --check
         run: prettier --check ${GITHUB_WORKSPACE}
 
+  PythonBlack:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v2
+
+      - name: Check code lints with Black
+        uses: psf/black@stable
+
+      # If the above check failed, post a comment on the PR explaining the failure
+      - name: Post PR comment
+        if: failure()
+        uses: mshick/add-pr-comment@v1
+        with:
+          message: |
+            ## Python linting (`black`) is failing
+
+            To keep the code consistent with lots of contributors, we run automated code consistency checks.
+            To fix this CI test, please run:
+
+            * Install [`black`](https://black.readthedocs.io/en/stable/): `pip install black`
+            * Fix formatting errors in your pipeline: `black .`
+
+            Once you push these changes the test should pass, and you can hide this comment :+1:
+
+            We highly recommend setting up Black in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help!
+
+            Thanks again for your contribution!
+          repo-token: ${{ secrets.GITHUB_TOKEN }}
+          allow-repeats: false
+
   nf-core:
     runs-on: ubuntu-latest
     steps:
@@ -42,15 +72,11 @@ jobs:
         uses: actions/checkout@v2
 
       - name: Install Nextflow
-        env:
-          CAPSULE_LOG: none
-        run: |
-          wget -qO- get.nextflow.io | bash
-          sudo mv nextflow /usr/local/bin/
+        uses: nf-core/setup-nextflow@v1
 
       - uses: actions/setup-python@v3
         with:
-          python-version: "3.6"
+          python-version: "3.7"
           architecture: "x64"
 
       - name: Install dependencies
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bcbe42c1..ddd11538 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,7 +3,7 @@
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
-## v1.3.0dev - [date]
+## v2.0.0 - [date]
 
 Initial release of nf-core/chipseq, created with the [nf-core](https://nf-co.re/) template.
 
diff --git a/CITATION.cff b/CITATION.cff
new file mode 100644
index 00000000..4533e2f2
--- /dev/null
+++ b/CITATION.cff
@@ -0,0 +1,56 @@
+cff-version: 1.2.0
+message: "If you use `nf-core tools` in your work, please cite the `nf-core` publication"
+authors:
+  - family-names: Ewels
+    given-names: Philip
+  - family-names: Peltzer
+    given-names: Alexander
+  - family-names: Fillinger
+    given-names: Sven
+  - family-names: Patel
+    given-names: Harshil
+  - family-names: Alneberg
+    given-names: Johannes
+  - family-names: Wilm
+    given-names: Andreas
+  - family-names: Ulysse Garcia
+    given-names: Maxime
+  - family-names: Di Tommaso
+    given-names: Paolo
+  - family-names: Nahnsen
+    given-names: Sven
+title: "The nf-core framework for community-curated bioinformatics pipelines."
+version: 2.4.1
+doi: 10.1038/s41587-020-0439-x
+date-released: 2022-05-16
+url: https://github.com/nf-core/tools
+prefered-citation:
+  type: article
+  authors:
+    - family-names: Ewels
+      given-names: Philip
+    - family-names: Peltzer
+      given-names: Alexander
+    - family-names: Fillinger
+      given-names: Sven
+    - family-names: Patel
+      given-names: Harshil
+    - family-names: Alneberg
+      given-names: Johannes
+    - family-names: Wilm
+      given-names: Andreas
+    - family-names: Ulysse Garcia
+      given-names: Maxime
+    - family-names: Di Tommaso
+      given-names: Paolo
+    - family-names: Nahnsen
+      given-names: Sven
+  doi: 10.1038/s41587-020-0439-x
+  journal: nature biotechnology
+  start: 276
+  end: 278
+  title: "The nf-core framework for community-curated bioinformatics pipelines."
+  issue: 3
+  volume: 38
+  year: 2020
+  url: https://dx.doi.org/10.1038/s41587-020-0439-x
diff --git a/README.md b/README.md
index 45ee3b4f..095d129c 100644
--- a/README.md
+++ b/README.md
@@ -1,19 +1,14 @@
 # ![nf-core/chipseq](docs/images/nf-core-chipseq_logo_light.png#gh-light-mode-only) ![nf-core/chipseq](docs/images/nf-core-chipseq_logo_dark.png#gh-dark-mode-only)
 
-[![GitHub Actions CI Status](https://github.com/nf-core/chipseq/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/chipseq/actions?query=workflow%3A%22nf-core+CI%22)
-[![GitHub Actions Linting Status](https://github.com/nf-core/chipseq/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/chipseq/actions?query=workflow%3A%22nf-core+linting%22)
-[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?logo=Amazon%20AWS)](https://nf-co.re/chipseq/results)
-[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8)](https://doi.org/10.5281/zenodo.XXXXXXX)
+[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/chipseq/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX)
 
 [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.10.3-23aa62.svg)](https://www.nextflow.io/)
-[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?logo=anaconda)](https://docs.conda.io/en/latest/)
-[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?logo=docker)](https://www.docker.com/)
-[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg)](https://sylabs.io/docs/)
+[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)
+[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)
+[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)
 [![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/chipseq)
 
-[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23chipseq-4A154B?logo=slack)](https://nfcore.slack.com/channels/chipseq)
-[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?logo=twitter)](https://twitter.com/nf_core)
-[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?logo=youtube)](https://www.youtube.com/c/nf-core)
+[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23chipseq-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/chipseq)[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)
 
 ## Introduction
 
@@ -25,7 +20,7 @@ The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool
 
 <!-- TODO nf-core: Add full-sized test dataset and amend the paragraph below if applicable -->
 
-On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/chipseq/results).
+On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources.The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/chipseq/results).
 
 ## Pipeline summary
 
@@ -42,7 +37,7 @@ On release, automated continuous integration tests run the pipeline on a full-si
 
 3. Download the pipeline and test it on a minimal dataset with a single command:
 
-   ```console
+   ```bash
    nextflow run nf-core/chipseq -profile test,YOURPROFILE --outdir <OUTDIR>
    ```
 
@@ -57,7 +52,7 @@ On release, automated continuous integration tests run the pipeline on a full-si
 
    <!-- TODO nf-core: Update the example "typical command" below used to run the pipeline -->
 
-   ```console
+   ```bash
    nextflow run nf-core/chipseq --input samplesheet.csv --outdir <OUTDIR> --genome GRCh37 -profile <docker/singularity/podman/shifter/charliecloud/conda/institute>
    ```
 
diff --git a/assets/email_template.txt b/assets/email_template.txt
index 6d35a697..e1b78526 100644
--- a/assets/email_template.txt
+++ b/assets/email_template.txt
@@ -6,7 +6,6 @@
                                         `._,._,'
   nf-core/chipseq v${version}
 ----------------------------------------------------
-
 Run Name: $runName
 
 <% if (success){
diff --git a/bin/check_samplesheet.py b/bin/check_samplesheet.py
index 3652c63c..9a8b8962 100755
--- a/bin/check_samplesheet.py
+++ b/bin/check_samplesheet.py
@@ -11,7 +11,6 @@ import sys
 from collections import Counter
 from pathlib import Path
 
-
 logger = logging.getLogger()
 
 
@@ -79,13 +78,15 @@ class RowChecker:
 
     def _validate_sample(self, row):
         """Assert that the sample name exists and convert spaces to underscores."""
-        assert len(row[self._sample_col]) > 0, "Sample input is required."
+        if len(row[self._sample_col]) <= 0:
+            raise AssertionError("Sample input is required.")
         # Sanitize samples slightly.
         row[self._sample_col] = row[self._sample_col].replace(" ", "_")
 
     def _validate_first(self, row):
         """Assert that the first FASTQ entry is non-empty and has the right format."""
-        assert len(row[self._first_col]) > 0, "At least the first FASTQ file is required."
+        if len(row[self._first_col]) <= 0:
+            raise AssertionError("At least the first FASTQ file is required.")
         self._validate_fastq_format(row[self._first_col])
 
     def _validate_second(self, row):
@@ -97,36 +98,34 @@ class RowChecker:
         """Assert that read pairs have the same file extension. Report pair status."""
         if row[self._first_col] and row[self._second_col]:
             row[self._single_col] = False
-            assert (
-                Path(row[self._first_col]).suffixes[-2:] == Path(row[self._second_col]).suffixes[-2:]
-            ), "FASTQ pairs must have the same file extensions."
+            if Path(row[self._first_col]).suffixes[-2:] != Path(row[self._second_col]).suffixes[-2:]:
+                raise AssertionError("FASTQ pairs must have the same file extensions.")
         else:
             row[self._single_col] = True
 
     def _validate_fastq_format(self, filename):
         """Assert that a given filename has one of the expected FASTQ extensions."""
-        assert any(filename.endswith(extension) for extension in self.VALID_FORMATS), (
-            f"The FASTQ file has an unrecognized extension: {filename}\n"
-            f"It should be one of: {', '.join(self.VALID_FORMATS)}"
-        )
+        if not any(filename.endswith(extension) for extension in self.VALID_FORMATS):
+            raise AssertionError(
+                f"The FASTQ file has an unrecognized extension: {filename}\n"
+                f"It should be one of: {', '.join(self.VALID_FORMATS)}"
+            )
 
     def validate_unique_samples(self):
         """
         Assert that the combination of sample name and FASTQ filename is unique.
 
-        In addition to the validation, also rename the sample if more than one sample,
-        FASTQ file combination exists.
+        In addition to the validation, also rename all samples to have a suffix of _T{n}, where n is the
+        number of times the same sample exist, but with different FASTQ files, e.g., multiple runs per experiment.
 
         """
-        assert len(self._seen) == len(self.modified), "The pair of sample name and FASTQ must be unique."
-        if len({pair[0] for pair in self._seen}) < len(self._seen):
-            counts = Counter(pair[0] for pair in self._seen)
-            seen = Counter()
-            for row in self.modified:
-                sample = row[self._sample_col]
-                seen[sample] += 1
-                if counts[sample] > 1:
-                    row[self._sample_col] = f"{sample}_T{seen[sample]}"
+        if len(self._seen) != len(self.modified):
+            raise AssertionError("The pair of sample name and FASTQ must be unique.")
+        seen = Counter()
+        for row in self.modified:
+            sample = row[self._sample_col]
+            seen[sample] += 1
+            row[self._sample_col] = f"{sample}_T{seen[sample]}"
 
 
 def read_head(handle, num_lines=10):
diff --git a/conf/base.config b/conf/base.config
index 8231c479..daf12c7c 100644
--- a/conf/base.config
+++ b/conf/base.config
@@ -26,6 +26,11 @@ process {
     //        adding in your local modules too.
     // TODO nf-core: Customise requirements for specific processes.
     // See https://www.nextflow.io/docs/latest/config.html#config-process-selectors
+    withLabel:process_single {
+        cpus   = { check_max( 1                  , 'cpus'    ) }
+        memory = { check_max( 6.GB * task.attempt, 'memory'  ) }
+        time   = { check_max( 4.h  * task.attempt, 'time'    ) }
+    }
     withLabel:process_low {
         cpus   = { check_max( 2     * task.attempt, 'cpus'    ) }
         memory = { check_max( 12.GB * task.attempt, 'memory'  ) }
diff --git a/docs/images/nf-core-chipseq_logo_dark.png b/docs/images/nf-core-chipseq_logo_dark.png
index cf22a2e865ba247de7727c84ee2e990aa932803e..9f2b30100fd7ed5870367833ae6567d1dd97f407 100644
GIT binary patch
delta 23
fcmcb8kmd0~mJLw}91IK!44y8IA)BKT8V>*fZT<+W

delta 14
WcmaESkmdG4mJLw}o0Ae64*&o;s|RTS

diff --git a/docs/usage.md b/docs/usage.md
index e6dc9daa..997dfd2c 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -12,7 +12,7 @@
 
 You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 3 columns, and a header row as shown in the examples below.
 
-```console
+```bash
 --input '[path to samplesheet file]'
 ```
 
@@ -56,7 +56,7 @@ An [example samplesheet](../assets/samplesheet.csv) has been provided with the p
 
 The typical command for running the pipeline is as follows:
 
-```console
+```bash
 nextflow run nf-core/chipseq --input samplesheet.csv --outdir <OUTDIR> --genome GRCh37 -profile docker
 ```
 
@@ -64,9 +64,9 @@ This will launch the pipeline with the `docker` configuration profile. See below
 
 Note that the pipeline will create the following files in your working directory:
 
-```console
+```bash
 work                # Directory containing the nextflow working files
-<OUTIDR>            # Finished results in specified location (defined with --outdir)
+<OUTDIR>            # Finished results in specified location (defined with --outdir)
 .nextflow_log       # Log file from Nextflow
 # Other nextflow hidden files, eg. history of pipeline runs and old logs.
 ```
@@ -75,7 +75,7 @@ work                # Directory containing the nextflow working files
 
 When you run the above command, Nextflow automatically pulls the pipeline code from GitHub and stores it as a cached version. When running the pipeline after this, it will always use the cached version if available - even if the pipeline has been updated since. To make sure that you're running the latest version of the pipeline, make sure that you regularly update the cached version of the pipeline:
 
-```console
+```bash
 nextflow pull nf-core/chipseq
 ```
 
@@ -251,6 +251,6 @@ Some HPC setups also allow you to run nextflow within a cluster job submitted yo
 In some cases, the Nextflow Java virtual machines can start to request a large amount of memory.
 We recommend adding the following line to your environment to limit this (typically in `~/.bashrc` or `~./bash_profile`):
 
-```console
+```bash
 NXF_OPTS='-Xms1g -Xmx4g'
 ```
diff --git a/lib/WorkflowChipseq.groovy b/lib/WorkflowChipseq.groovy
index 547194f7..a5952af0 100755
--- a/lib/WorkflowChipseq.groovy
+++ b/lib/WorkflowChipseq.groovy
@@ -10,6 +10,7 @@ class WorkflowChipseq {
     public static void initialise(params, log) {
         genomeExistsError(params, log)
 
+
         if (!params.fasta) {
             log.error "Genome fasta file not specified with e.g. '--fasta genome.fa' or via a detectable config file."
             System.exit(1)
@@ -41,9 +42,7 @@ class WorkflowChipseq {
         yaml_file_text        += "data: |\n"
         yaml_file_text        += "${summary_section}"
         return yaml_file_text
-    }
-
-    //
+    }//
     // Exit pipeline if incorrect --genome key provided
     //
     private static void genomeExistsError(params, log) {
diff --git a/lib/WorkflowMain.groovy b/lib/WorkflowMain.groovy
index 0bf58e43..904824ae 100755
--- a/lib/WorkflowMain.groovy
+++ b/lib/WorkflowMain.groovy
@@ -59,6 +59,7 @@ class WorkflowMain {
         }
 
         // Print parameter summary log to screen
+
         log.info paramsSummaryLog(workflow, params, log)
 
         // Check that a -profile or Nextflow config has been provided to run the pipeline
@@ -78,17 +79,15 @@ class WorkflowMain {
             System.exit(1)
         }
     }
-
     //
     // Get attribute from genome config file e.g. fasta
     //
-    public static String getGenomeAttribute(params, attribute) {
-        def val = ''
+    public static Object getGenomeAttribute(params, attribute) {
         if (params.genomes && params.genome && params.genomes.containsKey(params.genome)) {
             if (params.genomes[ params.genome ].containsKey(attribute)) {
-                val = params.genomes[ params.genome ][ attribute ]
+                return params.genomes[ params.genome ][ attribute ]
             }
         }
-        return val
+        return null
     }
 }
diff --git a/main.nf b/main.nf
index cf50b118..d6adc851 100644
--- a/main.nf
+++ b/main.nf
@@ -4,7 +4,7 @@
     nf-core/chipseq
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     Github : https://github.com/nf-core/chipseq
-    Website: https://nf-co.re/chipseq
+Website: https://nf-co.re/chipseq
     Slack  : https://nfcore.slack.com/channels/chipseq
 ----------------------------------------------------------------------------------------
 */
diff --git a/modules.json b/modules.json
index 72584e8a..db838714 100644
--- a/modules.json
+++ b/modules.json
@@ -3,14 +3,20 @@
     "homePage": "https://github.com/nf-core/chipseq",
     "repos": {
         "nf-core/modules": {
-            "custom/dumpsoftwareversions": {
-                "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d"
-            },
-            "fastqc": {
-                "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d"
-            },
-            "multiqc": {
-                "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d"
+            "git_url": "https://github.com/nf-core/modules.git",
+            "modules": {
+                "custom/dumpsoftwareversions": {
+                    "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d",
+                    "branch": "master"
+                },
+                "fastqc": {
+                    "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d",
+                    "branch": "master"
+                },
+                "multiqc": {
+                    "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d",
+                    "branch": "master"
+                }
             }
         }
     }
diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py b/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
index d1390392..787bdb7b 100644
--- a/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
+++ b/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
@@ -1,9 +1,10 @@
 #!/usr/bin/env python
 
-import yaml
 import platform
 from textwrap import dedent
 
+import yaml
+
 
 def _make_versions_html(versions):
     html = [
@@ -58,11 +59,12 @@ versions_by_module = {}
 for process, process_versions in versions_by_process.items():
     module = process.split(":")[-1]
     try:
-        assert versions_by_module[module] == process_versions, (
-            "We assume that software versions are the same between all modules. "
-            "If you see this error-message it means you discovered an edge-case "
-            "and should open an issue in nf-core/tools. "
-        )
+        if versions_by_module[module] != process_versions:
+            raise AssertionError(
+                "We assume that software versions are the same between all modules. "
+                "If you see this error-message it means you discovered an edge-case "
+                "and should open an issue in nf-core/tools. "
+            )
     except KeyError:
         versions_by_module[module] = process_versions
 
diff --git a/nextflow.config b/nextflow.config
index 6c5ab6b9..08da317e 100644
--- a/nextflow.config
+++ b/nextflow.config
@@ -13,11 +13,11 @@ params {
     // Input options
     input                      = null
 
+
     // References
     genome                     = null
     igenomes_base              = 's3://ngi-igenomes/igenomes'
     igenomes_ignore            = false
-
     // MultiQC options
     multiqc_config             = null
     multiqc_title              = null
@@ -37,6 +37,7 @@ params {
     schema_ignore_params       = 'genomes'
     enable_conda               = false
 
+
     // Config options
     custom_config_version      = 'master'
     custom_config_base         = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
@@ -45,6 +46,7 @@ params {
     config_profile_url         = null
     config_profile_name        = null
 
+
     // Max resource options
     // Defaults only, expecting to be overwritten
     max_memory                 = '128.GB'
@@ -72,6 +74,7 @@ try {
 // }
 
 
+
 profiles {
     debug { process.beforeScript = 'echo $HOSTNAME' }
     conda {
@@ -82,6 +85,15 @@ profiles {
         shifter.enabled        = false
         charliecloud.enabled   = false
     }
+    mamba {
+        params.enable_conda    = true
+        conda.useMamba         = true
+        docker.enabled         = false
+        singularity.enabled    = false
+        podman.enabled         = false
+        shifter.enabled        = false
+        charliecloud.enabled   = false
+    }
     docker {
         docker.enabled         = true
         docker.userEmulation   = true
@@ -119,10 +131,16 @@ profiles {
         podman.enabled         = false
         shifter.enabled        = false
     }
+    gitpod {
+        executor.name          = 'local'
+        executor.cpus          = 16
+        executor.memory        = 60.GB
+    }
     test      { includeConfig 'conf/test.config'      }
     test_full { includeConfig 'conf/test_full.config' }
 }
 
+
 // Load igenomes.config if required
 if (!params.igenomes_ignore) {
     includeConfig 'conf/igenomes.config'
@@ -130,6 +148,7 @@ if (!params.igenomes_ignore) {
     params.genomes = [:]
 }
 
+
 // Export these variables to prevent local Python/R libraries from conflicting with those in the container
 // The JULIA depot path has been adjusted to a fixed path `/usr/local/share/julia` that needs to be used for packages in the container.
 // See https://apeltzer.github.io/post/03-julia-lang-nextflow/ for details on that. Once we have a common agreement on where to keep Julia packages, this is adjustable.
@@ -169,7 +188,7 @@ manifest {
     description     = 'ChIP-seq peak-calling and differential analysis pipeline.'
     mainScript      = 'main.nf'
     nextflowVersion = '!>=21.10.3'
-    version         = '1.3.0dev'
+    version         = '2.0.0'
 }
 
 // Load modules.config for DSL2 module specific options
-- 
GitLab