diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 0000000000000000000000000000000000000000..ea27a5843a0ff5f97ef49908689f595c42e17a1f
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,27 @@
+{
+    "name": "nfcore",
+    "image": "nfcore/gitpod:latest",
+    "remoteUser": "gitpod",
+
+    // Configure tool-specific properties.
+    "customizations": {
+        // Configure properties specific to VS Code.
+        "vscode": {
+            // Set *default* container specific settings.json values on container create.
+            "settings": {
+                "python.defaultInterpreterPath": "/opt/conda/bin/python",
+                "python.linting.enabled": true,
+                "python.linting.pylintEnabled": true,
+                "python.formatting.autopep8Path": "/opt/conda/bin/autopep8",
+                "python.formatting.yapfPath": "/opt/conda/bin/yapf",
+                "python.linting.flake8Path": "/opt/conda/bin/flake8",
+                "python.linting.pycodestylePath": "/opt/conda/bin/pycodestyle",
+                "python.linting.pydocstylePath": "/opt/conda/bin/pydocstyle",
+                "python.linting.pylintPath": "/opt/conda/bin/pylint"
+            },
+
+            // Add the IDs of extensions you want installed when the container is created.
+            "extensions": ["ms-python.python", "ms-python.vscode-pylance", "nf-core.nf-core-extensionpack"]
+        }
+    }
+}
diff --git a/.editorconfig b/.editorconfig
index 43c7138733277a83ffc7e788e0283df17f6ccf71..75c2fe61f4886762396871469bba8684f282add2 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -8,7 +8,7 @@ trim_trailing_whitespace = true
 indent_size = 4
 indent_style = space
 
-[*.{md,yml,yaml,html,css,scss,js}]
+[*.{md,yml,yaml,html,css,scss,js,cff}]
 indent_size = 2
 
 # These files are edited and tested upstream in nf-core/modules
diff --git a/.gitattributes b/.gitattributes
index 050bb1203530c3ee0b610fe2a05aed5059a3bf19..7a2dabc29354bc42709be9241603cff642ce5c27 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,3 +1,4 @@
 *.config linguist-language=nextflow
+*.nf.test linguist-language=nextflow
 modules/nf-core/** linguist-generated
 subworkflows/nf-core/** linguist-generated
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 21343c4d8ef64758fbfe436553442d6a857c7024..3b558e400aaa1d5349ba9a3ca945664302ca189f 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -101,3 +101,19 @@ If you are using a new feature from core Nextflow, you may bump the minimum requ
 ### Images and figures
 
 For overview images and other documents we follow the nf-core [style guidelines and examples](https://nf-co.re/developers/design_guidelines).
+
+## GitHub Codespaces
+
+This repo includes a devcontainer configuration which will create a GitHub Codespaces for Nextflow development! This is an online developer environment that runs in your browser, complete with VSCode and a terminal.
+
+To get started:
+
+- Open the repo in [Codespaces](https://github.com/nf-core/hic/codespaces)
+- Tools installed
+  - nf-core
+  - Nextflow
+
+Devcontainer specs:
+
+- [DevContainer config](.devcontainer/devcontainer.json)
+- [Dockerfile](.devcontainer/Dockerfile)
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 98a7950ce9bef303424ad5ce1c10c1c6ed135f1d..e405327c328cc2a94ebadf33e9ac4cad766f7461 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -42,7 +42,7 @@ body:
     attributes:
       label: System information
       description: |
-        * Nextflow version _(eg. 21.10.3)_
+        * Nextflow version _(eg. 22.10.1)_
         * Hardware _(eg. HPC, Desktop, Cloud)_
         * Executor _(eg. slurm, local, awsbatch)_
         * Container engine: _(e.g. Docker, Singularity, Conda, Podman, Shifter or Charliecloud)_
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 56233aa3631298e9a762b06c6f868ee171b41773..c67458d182e840707cbccc6914ff1e3d95331b0e 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -15,8 +15,7 @@ Learn more about contributing: [CONTRIBUTING.md](https://github.com/nf-core/hic/
 
 - [ ] This comment contains a description of changes (with reason).
 - [ ] If you've fixed a bug or added code that should be tested, add tests!
-  - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/hic/tree/master/.github/CONTRIBUTING.md)
-  - [ ] If necessary, also make a PR on the nf-core/hic _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository.
+- [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/hic/tree/master/.github/CONTRIBUTING.md)- [ ] If necessary, also make a PR on the nf-core/hic _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository.
 - [ ] Make sure your code lints (`nf-core lint`).
 - [ ] Ensure the test suite passes (`nextflow run . -profile test,docker --outdir <OUTDIR>`).
 - [ ] Usage Documentation in `docs/usage.md` is updated.
diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml
index 82c1a4ecd1bac8b5410b2825e230f9f3924594d3..ad7e2ddfdff0e494d19ce8baa49db8123d6728a6 100644
--- a/.github/workflows/awsfulltest.yml
+++ b/.github/workflows/awsfulltest.yml
@@ -15,9 +15,6 @@ jobs:
     steps:
       - name: Launch workflow via tower
         uses: nf-core/tower-action@v3
-        # TODO nf-core: You can customise AWS full pipeline tests as required
-        # Add full size test data (but still relatively small datasets for few samples)
-        # on the `test_full.config` test runs with only one set of parameters
         with:
           workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }}
           access_token: ${{ secrets.TOWER_ACCESS_TOKEN }}
@@ -28,3 +25,7 @@ jobs:
               "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/hic/results-${{ github.sha }}"
             }
           profiles: test_full,aws_tower
+      - uses: actions/upload-artifact@v3
+        with:
+          name: Tower debug log file
+          path: tower_action_*.log
diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml
index 5cd0714273dca87007b7c5ea50df27d05fe3606d..3f9b365ffe5729722f94316715fe5e654860c152 100644
--- a/.github/workflows/awstest.yml
+++ b/.github/workflows/awstest.yml
@@ -23,3 +23,7 @@ jobs:
               "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/hic/results-test-${{ github.sha }}"
             }
           profiles: test,aws_tower
+      - uses: actions/upload-artifact@v3
+        with:
+          name: Tower debug log file
+          path: tower_action_*.log
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 0dac5c2786f66f7e503085c858823a4833afb92a..5531e307166fc8f4d5717ba3efc646693560c67b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,7 +10,10 @@ on:
 
 env:
   NXF_ANSI_LOG: false
-  CAPSULE_LOG: none
+
+concurrency:
+  group: "${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}"
+  cancel-in-progress: true
 
 jobs:
   test:
@@ -20,27 +23,17 @@ jobs:
     runs-on: ubuntu-latest
     strategy:
       matrix:
-        # Nextflow versions
-        include:
-          # Test pipeline minimum Nextflow version
-          - NXF_VER: "21.10.3"
-            NXF_EDGE: ""
-          # Test latest edge release of Nextflow
-          - NXF_VER: ""
-            NXF_EDGE: "1"
+        NXF_VER:
+          - "22.10.1"
+          - "latest-everything"
     steps:
       - name: Check out pipeline code
-        uses: actions/checkout@v2
+        uses: actions/checkout@v3
 
       - name: Install Nextflow
-        env:
-          NXF_VER: ${{ matrix.NXF_VER }}
-          # Uncomment only if the edge release is more recent than the latest stable release
-          # See https://github.com/nextflow-io/nextflow/issues/2467
-          # NXF_EDGE: ${{ matrix.NXF_EDGE }}
-        run: |
-          wget -qO- get.nextflow.io | bash
-          sudo mv nextflow /usr/local/bin/
+        uses: nf-core/setup-nextflow@v1
+        with:
+          version: "${{ matrix.NXF_VER }}"
 
       - name: Run pipeline with test data
         run: |
diff --git a/.github/workflows/fix-linting.yml b/.github/workflows/fix-linting.yml
index 502eff95019714bf6b719046f0127d1e998f6b8a..66550055794ba4b8fa3458edcf8a3e33de57dc30 100644
--- a/.github/workflows/fix-linting.yml
+++ b/.github/workflows/fix-linting.yml
@@ -24,7 +24,7 @@ jobs:
         env:
           GITHUB_TOKEN: ${{ secrets.nf_core_bot_auth_token }}
 
-      - uses: actions/setup-node@v2
+      - uses: actions/setup-node@v3
 
       - name: Install Prettier
         run: npm install -g prettier @prettier/plugin-php
@@ -34,9 +34,9 @@ jobs:
         id: prettier_status
         run: |
           if prettier --check ${GITHUB_WORKSPACE}; then
-            echo "::set-output name=result::pass"
+            echo "result=pass" >> $GITHUB_OUTPUT
           else
-            echo "::set-output name=result::fail"
+            echo "result=fail" >> $GITHUB_OUTPUT
           fi
 
       - name: Run 'prettier --write'
diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml
index 77358dee77fafceacab736de94ae79f2258a523f..858d622efc884fba438eb48c80d4122443dfa3a0 100644
--- a/.github/workflows/linting.yml
+++ b/.github/workflows/linting.yml
@@ -4,6 +4,8 @@ name: nf-core linting
 # that the code meets the nf-core guidelines.
 on:
   push:
+    branches:
+      - dev
   pull_request:
   release:
     types: [published]
@@ -12,9 +14,9 @@ jobs:
   EditorConfig:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
-      - uses: actions/setup-node@v2
+      - uses: actions/setup-node@v3
 
       - name: Install editorconfig-checker
         run: npm install -g editorconfig-checker
@@ -25,9 +27,9 @@ jobs:
   Prettier:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
 
-      - uses: actions/setup-node@v2
+      - uses: actions/setup-node@v3
 
       - name: Install Prettier
         run: npm install -g prettier
@@ -35,22 +37,48 @@ jobs:
       - name: Run Prettier --check
         run: prettier --check ${GITHUB_WORKSPACE}
 
+  PythonBlack:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+
+      - name: Check code lints with Black
+        uses: psf/black@stable
+
+      # If the above check failed, post a comment on the PR explaining the failure
+      - name: Post PR comment
+        if: failure()
+        uses: mshick/add-pr-comment@v1
+        with:
+          message: |
+            ## Python linting (`black`) is failing
+
+            To keep the code consistent with lots of contributors, we run automated code consistency checks.
+            To fix this CI test, please run:
+
+            * Install [`black`](https://black.readthedocs.io/en/stable/): `pip install black`
+            * Fix formatting errors in your pipeline: `black .`
+
+            Once you push these changes the test should pass, and you can hide this comment :+1:
+
+            We highly recommend setting up Black in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help!
+
+            Thanks again for your contribution!
+          repo-token: ${{ secrets.GITHUB_TOKEN }}
+          allow-repeats: false
+
   nf-core:
     runs-on: ubuntu-latest
     steps:
       - name: Check out pipeline code
-        uses: actions/checkout@v2
+        uses: actions/checkout@v3
 
       - name: Install Nextflow
-        env:
-          CAPSULE_LOG: none
-        run: |
-          wget -qO- get.nextflow.io | bash
-          sudo mv nextflow /usr/local/bin/
+        uses: nf-core/setup-nextflow@v1
 
-      - uses: actions/setup-python@v3
+      - uses: actions/setup-python@v4
         with:
-          python-version: "3.6"
+          python-version: "3.7"
           architecture: "x64"
 
       - name: Install dependencies
@@ -71,7 +99,7 @@ jobs:
 
       - name: Upload linting log file artifact
         if: ${{ always() }}
-        uses: actions/upload-artifact@v2
+        uses: actions/upload-artifact@v3
         with:
           name: linting-logs
           path: |
diff --git a/.github/workflows/linting_comment.yml b/.github/workflows/linting_comment.yml
index 04758f61e368b37a7651aff015e51a9c31162542..0bbcd30f23effefe9ac5a7a49cc16f43140c20a7 100644
--- a/.github/workflows/linting_comment.yml
+++ b/.github/workflows/linting_comment.yml
@@ -18,7 +18,7 @@ jobs:
 
       - name: Get PR number
         id: pr_number
-        run: echo "::set-output name=pr_number::$(cat linting-logs/PR_number.txt)"
+        run: echo "pr_number=$(cat linting-logs/PR_number.txt)" >> $GITHUB_OUTPUT
 
       - name: Post PR comment
         uses: marocchino/sticky-pull-request-comment@v2
diff --git a/.prettierignore b/.prettierignore
index d0e7ae58916d32ec5fc706ae0eda9403db44f290..437d763d0c2c8fdeb5f8f7e1e04d54771d9b46d6 100644
--- a/.prettierignore
+++ b/.prettierignore
@@ -1,4 +1,6 @@
 email_template.html
+adaptivecard.json
+slackreport.json
 .nextflow*
 work/
 data/
@@ -7,3 +9,4 @@ results/
 testing/
 testing*
 *.pyc
+bin/
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b66d2bc633650b8e74a028f5e760a79c83627dff..2ae98e78aebd7bdcb04e9145f37532425f5bdaf1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,11 +3,17 @@
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
-## v1.4.0dev
+## v1.4.0 - 2023-01-09
 
 ### `Added`
 
 - DSL2 version of nf-core-hic pipeline
+- Add full test dataset (#80)
+- Replace local modules by the cooler nf-core module
+
+### `Fixed`
+
+- Fix error in the Arima preset (#127)
 
 ## v1.3.1 - 2021-09-25
 
diff --git a/README.md b/README.md
index 48a23b070e337e18fd7ceb230d4aa802f7bdd4e5..2bb1cdac04a12081ea0bd675696f2103e5c6d096 100644
--- a/README.md
+++ b/README.md
@@ -1,19 +1,14 @@
 # ![nf-core/hic](docs/images/nf-core-hic_logo_light.png#gh-light-mode-only) ![nf-core/hic](docs/images/nf-core-hic_logo_dark.png#gh-dark-mode-only)
 
-[![GitHub Actions CI Status](https://github.com/nf-core/hic/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/hic/actions?query=workflow%3A%22nf-core+CI%22)
-[![GitHub Actions Linting Status](https://github.com/nf-core/hic/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/hic/actions?query=workflow%3A%22nf-core+linting%22)
-[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?logo=Amazon%20AWS)](https://nf-co.re/hic/results)
-[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8)](https://doi.org/10.5281/zenodo.XXXXXXX)
-
-[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.10.3-23aa62.svg)](https://www.nextflow.io/)
-[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?logo=anaconda)](https://docs.conda.io/en/latest/)
-[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?logo=docker)](https://www.docker.com/)
-[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg)](https://sylabs.io/docs/)
+[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/hic/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.2669512-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.2669512)
+
+[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)
+[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)
+[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)
+[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)
 [![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/hic)
 
-[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23hic-4A154B?logo=slack)](https://nfcore.slack.com/channels/hic)
-[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?logo=twitter)](https://twitter.com/nf_core)
-[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?logo=youtube)](https://www.youtube.com/c/nf-core)
+[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23hic-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/hic)[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)
 
 ## Introduction
 
@@ -21,7 +16,7 @@
 
 The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!
 
-On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/hic/results).
+On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources.The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/hic/results).
 
 ## Pipeline summary
 
@@ -43,13 +38,13 @@ On release, automated continuous integration tests run the pipeline on a full-si
 
 ## Quick Start
 
-1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.10.3`)
+1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.10.1`)
 
 2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.
 
 3. Download the pipeline and test it on a minimal dataset with a single command:
 
-   ```console
+   ```bash
    nextflow run nf-core/hic -profile test,YOURPROFILE --outdir <OUTDIR>
    ```
 
@@ -62,7 +57,7 @@ On release, automated continuous integration tests run the pipeline on a full-si
 
 4. Start running your own analysis!
 
-   ```console
+   ```bash
    nextflow run nf-core/hic --input samplesheet.csv --outdir <OUTDIR> --genome GRCh37 -profile <docker/singularity/podman/shifter/charliecloud/conda/institute>
    ```
 
diff --git a/assets/adaptivecard.json b/assets/adaptivecard.json
new file mode 100644
index 0000000000000000000000000000000000000000..79f9dbe9924bf336c47a8ad509ae5bf68e430640
--- /dev/null
+++ b/assets/adaptivecard.json
@@ -0,0 +1,67 @@
+{
+    "type": "message",
+    "attachments": [
+        {
+            "contentType": "application/vnd.microsoft.card.adaptive",
+            "contentUrl": null,
+            "content": {
+                "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
+                "msteams": {
+                    "width": "Full"
+                },
+                "type": "AdaptiveCard",
+                "version": "1.2",
+                "body": [
+                    {
+                        "type": "TextBlock",
+                        "size": "Large",
+                        "weight": "Bolder",
+                        "color": "<% if (success) { %>Good<% } else { %>Attention<%} %>",
+                        "text": "nf-core/hic v${version} - ${runName}",
+                        "wrap": true
+                    },
+                    {
+                        "type": "TextBlock",
+                        "spacing": "None",
+                        "text": "Completed at ${dateComplete} (duration: ${duration})",
+                        "isSubtle": true,
+                        "wrap": true
+                    },
+                    {
+                        "type": "TextBlock",
+                        "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors. The full error message was: ${errorReport}.<% } %>",
+                        "wrap": true
+                    },
+                    {
+                        "type": "TextBlock",
+                        "text": "The command used to launch the workflow was as follows:",
+                        "wrap": true
+                    },
+                    {
+                        "type": "TextBlock",
+                        "text": "${commandLine}",
+                        "isSubtle": true,
+                        "wrap": true
+                    }
+                ],
+                "actions": [
+                    {
+                        "type": "Action.ShowCard",
+                        "title": "Pipeline Configuration",
+                        "card": {
+                            "type": "AdaptiveCard",
+                            "\$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
+                            "body": [
+                                {
+                                    "type": "FactSet",
+                                    "facts": [<% out << summary.collect{ k,v -> "{\"title\": \"$k\", \"value\" : \"$v\"}"}.join(",\n") %>
+                                    ]
+                                }
+                            ]
+                        }
+                    }
+                ]
+            }
+        }
+    ]
+}
diff --git a/assets/email_template.txt b/assets/email_template.txt
index a951c5e7f965fa5829707fc84f4351495995190f..6905d6fc70619b99a53a9c51670b026095e2965f 100644
--- a/assets/email_template.txt
+++ b/assets/email_template.txt
@@ -6,7 +6,6 @@
                                         `._,._,'
   nf-core/hic v${version}
 ----------------------------------------------------
-
 Run Name: $runName
 
 <% if (success){
diff --git a/assets/methods_description_template.yml b/assets/methods_description_template.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2f0a30855b2ab842a1f5b16cb9f5ce8437f96dbd
--- /dev/null
+++ b/assets/methods_description_template.yml
@@ -0,0 +1,26 @@
+id: "nf-core-hic-methods-description"
+description: "Suggested text and references to use when describing pipeline usage within the methods section of a publication."
+section_name: "nf-core/hic Methods Description"
+section_href: "https://github.com/nf-core/hic"
+plot_type: "html"
+## nf-core: Update the HTML below to your prefered methods description, e.g. add publication citation for this pipeline
+## You inject any metadata in the Nextflow '${workflow}' object
+data: |
+  <h4>Methods</h4>
+  <p>Data was processed using nf-core/hic v${workflow.manifest.version} ${doi_text} of the nf-core collection of workflows (<a href="https://doi.org/10.1038/s41587-020-0439-x">Ewels <em>et al.</em>, 2020</a>).</p>
+  <p>The pipeline was executed with Nextflow v${workflow.nextflow.version} (<a href="https://doi.org/10.1038/nbt.3820">Di Tommaso <em>et al.</em>, 2017</a>) with the following command:</p>
+  <pre><code>${workflow.commandLine}</code></pre>
+  <h4>References</h4>
+  <ul>
+    <li>Servant, N., Ewels, P. A., Peltzer, A., Garcia, M. U. (2021) nf-core/hic. Zenodo. https://doi.org/10.5281/zenodo.2669512</li>
+    <li>Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316-319. <a href="https://doi.org/10.1038/nbt.3820">https://doi.org/10.1038/nbt.3820</a></li>
+    <li>Ewels, P. A., Peltzer, A., Fillinger, S., Patel, H., Alneberg, J., Wilm, A., Garcia, M. U., Di Tommaso, P., & Nahnsen, S. (2020). The nf-core framework for community-curated bioinformatics pipelines. Nature Biotechnology, 38(3), 276-278. <a href="https://doi.org/10.1038/s41587-020-0439-x">https://doi.org/10.1038/s41587-020-0439-x</a></li>
+  </ul>
+  <div class="alert alert-info">
+    <h5>Notes:</h5>
+    <ul>
+      ${nodoi_text}
+      <li>The command above does not include parameters contained in any configs or profiles that may have been used. Ensure the config file is also uploaded with your publication!</li>
+      <li>You should also cite all software used within this run. Check the "Software Versions" of this report to get version information.</li>
+    </ul>
+  </div>
diff --git a/assets/multiqc_config.yml b/assets/multiqc_config.yml
index e371ccf83ba8d11c96891ddae6c55e120e370667..b2cf07d846fa86aa4100cd1ac05fd72b3827dde4 100644
--- a/assets/multiqc_config.yml
+++ b/assets/multiqc_config.yml
@@ -3,9 +3,11 @@ report_comment: >
   analysis pipeline. For information about how to interpret these results, please see the
   <a href="https://nf-co.re/hic" target="_blank">documentation</a>.
 report_section_order:
-  software_versions:
+  "nf-core-hic-methods-description":
     order: -1000
-  "nf-core-hic-summary":
+  software_versions:
     order: -1001
+  "nf-core-hic-summary":
+    order: -1002
 
 export_plots: true
diff --git a/assets/slackreport.json b/assets/slackreport.json
new file mode 100644
index 0000000000000000000000000000000000000000..043d02f27570da8e53dd7d3dd6d0a640cfa4636d
--- /dev/null
+++ b/assets/slackreport.json
@@ -0,0 +1,34 @@
+{
+    "attachments": [
+        {
+            "fallback": "Plain-text summary of the attachment.",
+            "color": "<% if (success) { %>good<% } else { %>danger<%} %>",
+            "author_name": "sanger-tol/readmapping v${version} - ${runName}",
+            "author_icon": "https://www.nextflow.io/docs/latest/_static/favicon.ico",
+            "text": "<% if (success) { %>Pipeline completed successfully!<% } else { %>Pipeline completed with errors<% } %>",
+            "fields": [
+                {
+                    "title": "Command used to launch the workflow",
+                    "value": "```${commandLine}```",
+                    "short": false
+                }
+                <%
+                    if (!success) { %>
+                    ,
+                    {
+                        "title": "Full error message",
+                        "value": "```${errorReport}```",
+                        "short": false
+                    },
+                    {
+                        "title": "Pipeline configuration",
+                        "value": "<% out << summary.collect{ k,v -> k == "hook_url" ? "_${k}_: (_hidden_)" : ( ( v.class.toString().contains('Path') || ( v.class.toString().contains('String') && v.contains('/') ) ) ? "_${k}_: `${v}`" : (v.class.toString().contains('DateTime') ? ("_${k}_: " + v.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM))) : "_${k}_: ${v}") ) }.join(",\n") %>",
+                        "short": false
+                    }
+                    <% }
+                %>
+            ],
+            "footer": "Completed at <% out << dateComplete.format(java.time.format.DateTimeFormatter.ofLocalizedDateTime(java.time.format.FormatStyle.MEDIUM)) %> (duration: ${duration})"
+        }
+    ]
+}
diff --git a/bin/check_samplesheet.py b/bin/check_samplesheet.py
index 6be15bd25d60d6901b2ff4154033e6271d9f3869..c498ef45ee96e33a8d4f81b0be948028c712db61 100755
--- a/bin/check_samplesheet.py
+++ b/bin/check_samplesheet.py
@@ -10,7 +10,6 @@ import sys
 from collections import Counter
 from pathlib import Path
 
-
 logger = logging.getLogger()
 
 
@@ -78,13 +77,15 @@ class RowChecker:
 
     def _validate_sample(self, row):
         """Assert that the sample name exists and convert spaces to underscores."""
-        assert len(row[self._sample_col]) > 0, "Sample input is required."
+        if len(row[self._sample_col]) <= 0:
+            raise AssertionError("Sample input is required.")
         # Sanitize samples slightly.
         row[self._sample_col] = row[self._sample_col].replace(" ", "_")
 
     def _validate_first(self, row):
         """Assert that the first FASTQ entry is non-empty and has the right format."""
-        assert len(row[self._first_col]) > 0, "At least the first FASTQ file is required."
+        if len(row[self._first_col]) <= 0:
+            raise AssertionError("At least the first FASTQ file is required.")
         self._validate_fastq_format(row[self._first_col])
 
     def _validate_second(self, row):
@@ -96,36 +97,36 @@ class RowChecker:
         """Assert that read pairs have the same file extension. Report pair status."""
         if row[self._first_col] and row[self._second_col]:
             row[self._single_col] = False
-            assert (
-                Path(row[self._first_col]).suffixes[-2:] == Path(row[self._second_col]).suffixes[-2:]
-            ), "FASTQ pairs must have the same file extensions."
+            first_col_suffix = Path(row[self._first_col]).suffixes[-2:]
+            second_col_suffix = Path(row[self._second_col]).suffixes[-2:]
+            if first_col_suffix != second_col_suffix:
+                raise AssertionError("FASTQ pairs must have the same file extensions.")
         else:
             row[self._single_col] = True
 
     def _validate_fastq_format(self, filename):
         """Assert that a given filename has one of the expected FASTQ extensions."""
-        assert any(filename.endswith(extension) for extension in self.VALID_FORMATS), (
-            f"The FASTQ file has an unrecognized extension: {filename}\n"
-            f"It should be one of: {', '.join(self.VALID_FORMATS)}"
-        )
+        if not any(filename.endswith(extension) for extension in self.VALID_FORMATS):
+            raise AssertionError(
+                f"The FASTQ file has an unrecognized extension: {filename}\n"
+                f"It should be one of: {', '.join(self.VALID_FORMATS)}"
+            )
 
     def validate_unique_samples(self):
         """
         Assert that the combination of sample name and FASTQ filename is unique.
 
-        In addition to the validation, also rename the sample if more than one sample,
-        FASTQ file combination exists.
+        In addition to the validation, also rename all samples to have a suffix of _T{n}, where n is the
+        number of times the same sample exist, but with different FASTQ files, e.g., multiple runs per experiment.
 
         """
-        assert len(self._seen) == len(self.modified), "The pair of sample name and FASTQ must be unique."
-        if len({pair[0] for pair in self._seen}) < len(self._seen):
-            counts = Counter(pair[0] for pair in self._seen)
-            seen = Counter()
-            for row in self.modified:
-                sample = row[self._sample_col]
-                seen[sample] += 1
-                #if counts[sample] > 1:
-                #    row[self._sample_col] = f"{sample}_T{seen[sample]}"
+        if len(self._seen) != len(self.modified):
+            raise AssertionError("The pair of sample name and FASTQ must be unique.")
+        seen = Counter()
+        for row in self.modified:
+            sample = row[self._sample_col]
+            seen[sample] += 1
+            ##row[self._sample_col] = f"{sample}_T{seen[sample]}"
 
 
 def read_head(handle, num_lines=10):
@@ -157,7 +158,7 @@ def sniff_format(handle):
     handle.seek(0)
     sniffer = csv.Sniffer()
     if not sniffer.has_header(peek):
-        logger.critical(f"The given sample sheet does not appear to contain a header.")
+        logger.critical("The given sample sheet does not appear to contain a header.")
         sys.exit(1)
     dialect = sniffer.sniff(peek)
     return dialect
@@ -195,7 +196,8 @@ def check_samplesheet(file_in, file_out):
         reader = csv.DictReader(in_handle, dialect=sniff_format(in_handle))
         # Validate the existence of the expected header columns.
         if not required_columns.issubset(reader.fieldnames):
-            logger.critical(f"The sample sheet **must** contain the column headers: {', '.join(required_columns)}.")
+            req_cols = ", ".join(required_columns)
+            logger.critical(f"The sample sheet **must** contain these column headers: {req_cols}.")
             sys.exit(1)
         # Validate each row.
         checker = RowChecker()
diff --git a/bin/digest_genome.py b/bin/digest_genome.py
index 2c29a49e1cf174f12142f78627fd799b83da2788..9f05b45b828e81abe35c146cc4b598334cf43916 100755
--- a/bin/digest_genome.py
+++ b/bin/digest_genome.py
@@ -18,15 +18,11 @@ import os
 import sys
 import numpy as np
 
-RE_cutsite = {
-    "mboi": ["^GATC"],
-    "dpnii": ["^GATC"],
-    "bglii": ["A^GATCT"],
-    "hindiii": ["A^AGCTT"]}
+RE_cutsite = {"mboi": ["^GATC"], "dpnii": ["^GATC"], "bglii": ["A^GATCT"], "hindiii": ["A^AGCTT"]}
 
 
 def find_re_sites(filename, sequences, offset):
-    with open(filename, 'r') as infile:
+    with open(filename, "r") as infile:
         chr_id = None
         big_str = ""
         indices = []
@@ -40,13 +36,12 @@ def find_re_sites(filename, sequences, offset):
                 # If this is not the first chromosome, find the indices and append
                 # them to the list
                 if chr_id is not None:
-                     for rs in range(len(sequences)):
-                         pattern = "(?={})".format(sequences[rs].lower())
-                         indices += [m.start() + offset[rs]\
-                         for m in re.finditer(pattern, big_str)]
-                     indices.sort()
-                     all_indices.append(indices)
-                     indices = []
+                    for rs in range(len(sequences)):
+                        pattern = "(?={})".format(sequences[rs].lower())
+                        indices += [m.start() + offset[rs] for m in re.finditer(pattern, big_str)]
+                    indices.sort()
+                    all_indices.append(indices)
+                    indices = []
 
                 # This is a new chromosome. Empty the sequence string, and add the
                 # correct chrom id
@@ -63,11 +58,10 @@ def find_re_sites(filename, sequences, offset):
         # Add the indices for the last chromosome
         for rs in range(len(sequences)):
             pattern = "(?={})".format(sequences[rs].lower())
-            indices += [m.start() + offset[rs]
-                        for m in re.finditer(pattern, big_str)]
+            indices += [m.start() + offset[rs] for m in re.finditer(pattern, big_str)]
         indices.sort()
         all_indices.append(indices)
-    
+
     return contig_names, all_indices
 
 
@@ -75,7 +69,7 @@ def find_chromsomose_lengths(reference_filename):
     chromosome_lengths = []
     chromosome_names = []
     length = None
-    with open(reference_filename, 'r') as infile:
+    with open(reference_filename, "r") as infile:
         for line in infile:
             if line.startswith(">"):
                 chromosome_names.append(line[1:].strip())
@@ -89,11 +83,11 @@ def find_chromsomose_lengths(reference_filename):
 
 
 def replaceN(cs):
-    npos = int(cs.find('N'))
+    npos = int(cs.find("N"))
     cseql = []
     if npos != -1:
-        for nuc in ["A","C","G","T"]:
-            tmp = cs.replace('N', nuc, 1)
+        for nuc in ["A", "C", "G", "T"]:
+            tmp = cs.replace("N", nuc, 1)
             tmpl = replaceN(tmp)
             if type(tmpl) == list:
                 cseql = cseql + tmpl
@@ -106,50 +100,59 @@ def replaceN(cs):
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()
-    parser.add_argument('fastafile')
-    parser.add_argument('-r', '--restriction_sites',
-                        dest='res_sites',
-                        nargs='+',
-                        help=("The cutting position has to be specified using "
-                              "'^'. For instance, -r A^AGCTT for HindIII "
-                              "digestion. Several restriction enzyme can be "
-                              "specified."))
-    parser.add_argument('-o', '--out', default=None)
+    parser.add_argument("fastafile")
+    parser.add_argument(
+        "-r",
+        "--restriction_sites",
+        dest="res_sites",
+        nargs="+",
+        help=(
+            "The cutting position has to be specified using "
+            "'^'. For instance, -r A^AGCTT for HindIII "
+            "digestion. Several restriction enzyme can be "
+            "specified."
+        ),
+    )
+    parser.add_argument("-o", "--out", default=None)
     args = parser.parse_args()
 
     filename = args.fastafile
     out = args.out
-    
+
     # Split restriction sites if comma-separated
-    cutsites=[]
+    cutsites = []
     for s in args.res_sites:
-        for m in s.split(','):
+        for m in s.split(","):
             cutsites.append(m)
-                
+
     # process args and get restriction enzyme sequences
     sequences = []
     offset = []
     for cs in cutsites:
         if cs.lower() in RE_cutsite:
-            cseq = ''.join(RE_cutsite[cs.lower()])
+            cseq = "".join(RE_cutsite[cs.lower()])
         else:
             cseq = cs
 
-        offpos = int(cseq.find('^'))
+        offpos = int(cseq.find("^"))
         if offpos == -1:
-            print("Unable to detect offset for {}. Please, use '^' to specify the cutting position,\
-                   i.e A^GATCT for HindIII digestion.".format(cseq))
+            print(
+                "Unable to detect offset for {}. Please, use '^' to specify the cutting position,\
+                   i.e A^GATCT for HindIII digestion.".format(
+                    cseq
+                )
+            )
             sys.exit(-1)
 
         for nuc in list(set(cs)):
-            if nuc not in ['A','T','G','C','N','^']:
+            if nuc not in ["A", "T", "G", "C", "N", "^"]:
                 print("Find unexpected character ['{}']in restriction motif".format(nuc))
                 print("Note that multiple motifs should be separated by a space (not a comma !)")
 
                 sys.exit(-1)
 
         offset.append(offpos)
-        sequences.append(re.sub('\^', '', cseq))
+        sequences.append(re.sub("\^", "", cseq))
 
     # replace all N in restriction motif
     sequences_without_N = []
@@ -158,32 +161,32 @@ if __name__ == "__main__":
         nrs = replaceN(sequences[rs])
         sequences_without_N = sequences_without_N + nrs
         offset_without_N = offset_without_N + [offset[rs]] * len(nrs)
-          
+
     sequences = sequences_without_N
     offset = offset_without_N
-    
+
     if out is None:
         out = os.path.splitext(filename)[0] + "_fragments.bed"
 
     print("Analyzing", filename)
     print("Restriction site(s)", ",".join(sequences))
-    print("Offset(s)",  ','.join(str(x) for x in offset))
+    print("Offset(s)", ",".join(str(x) for x in offset))
 
     # Read fasta file and look for rs per chromosome
-    contig_names, all_indices = find_re_sites(filename, sequences,  offset=offset)
+    contig_names, all_indices = find_re_sites(filename, sequences, offset=offset)
     _, lengths = find_chromsomose_lengths(filename)
 
     valid_fragments = []
     for i, indices in enumerate(all_indices):
         valid_fragments_chr = np.concatenate(
-            [np.concatenate([[0], indices])[:, np.newaxis],
-             np.concatenate([indices, [lengths[i]]])[:, np.newaxis]],
-            axis=1)
+            [np.concatenate([[0], indices])[:, np.newaxis], np.concatenate([indices, [lengths[i]]])[:, np.newaxis]],
+            axis=1,
+        )
         valid_fragments.append(valid_fragments_chr)
 
     # Write results
     print("Writing to {} ...".format(out))
-    with open(out, 'w') as outfile:
+    with open(out, "w") as outfile:
         for chrom_name, indices in zip(contig_names, valid_fragments):
             frag_id = 0
             for begin, end in indices:
@@ -192,4 +195,6 @@ if __name__ == "__main__":
                 if end > begin:
                     frag_id += 1
                     frag_name = "HIC_{}_{}".format(str(chrom_name), int(frag_id))
-                    outfile.write("{}\t{}\t{}\t{}\t0\t+\n".format(str(chrom_name), int(begin), int(end), str(frag_name)))
+                    outfile.write(
+                        "{}\t{}\t{}\t{}\t0\t+\n".format(str(chrom_name), int(begin), int(end), str(frag_name))
+                    )
diff --git a/bin/mapped_2hic_dnase.py b/bin/mapped_2hic_dnase.py
index dd023b0023e0c0a7aa4780bcc04289e467ed877b..ff593666f03c5b82928ced06b941be296d6169ea 100755
--- a/bin/mapped_2hic_dnase.py
+++ b/bin/mapped_2hic_dnase.py
@@ -25,8 +25,12 @@ def usage():
     print("-r/--mappedReadsFile <BAM/SAM file of mapped reads>")
     print("[-o/--outputDir] <Output directory. Default is current directory>")
     print("[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>")
-    print("[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>")
-    print("[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>")
+    print(
+        "[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>"
+    )
+    print(
+        "[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>"
+    )
     print("[-v/--verbose] <Verbose>")
     print("[-h/--help] <Help>")
     return
@@ -38,8 +42,8 @@ def get_args():
         opts, args = getopt.getopt(
             sys.argv[1:],
             "r:o:d:g:avh",
-            ["mappedReadsFile=",
-             "outputDir=", "minDist=", "gatg", "all", "verbose", "help"])
+            ["mappedReadsFile=", "outputDir=", "minDist=", "gatg", "all", "verbose", "help"],
+        )
     except getopt.GetoptError:
         usage()
         sys.exit(-1)
@@ -78,8 +82,8 @@ def get_read_pos(read, st="start"):
         list of aligned reads
     """
     if st == "middle":
-        pos = read.reference_start + int(read.alen/2)
-    elif st =="start":
+        pos = read.reference_start + int(read.alen / 2)
+    elif st == "start":
         pos = get_read_start(read)
     elif st == "left":
         pos = read.reference_start
@@ -88,11 +92,11 @@ def get_read_pos(read, st="start"):
 
 
 def get_read_start(read):
-    """                                                                                                                                                                                                        
-    Return the 5' end of the read                                                                                                                                                                              
+    """
+    Return the 5' end of the read
     """
     if read.is_reverse:
-        pos = read.reference_start + read.alen -1
+        pos = read.reference_start + read.alen - 1
     else:
         pos = read.reference_start
     return pos
@@ -125,7 +129,7 @@ def get_ordered_reads(read1, read2):
 def isIntraChrom(read1, read2):
     """
     Return true is the reads pair is intrachromosomal
-    
+
     read1 : [AlignedRead]
     read2 : [AlignedRead]
 
@@ -163,23 +167,23 @@ def get_valid_orientation(read1, read2):
 
 
 def get_cis_dist(read1, read2):
-     """
-     Calculte the size of the DNA fragment library
+    """
+    Calculte the size of the DNA fragment library
 
-     read1 : [AlignedRead]
-     read2 : [AlignedRead]
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
 
-     """
-     # Get oriented reads
-     ##r1, r2 = get_ordered_reads(read1, read2)
-     dist = None
-     if not r1.is_unmapped and not r2.is_unmapped:         
-         ## Contact distances can be calculated for intrachromosomal reads only
-         if isIntraChrom(read1, read2):
-             r1pos = get_read_pos(read1)
-             r2pos = get_read_pos(read2)
-             dist = abs(r1pos - r2pos)
-     return dist
+    """
+    # Get oriented reads
+    ##r1, r2 = get_ordered_reads(read1, read2)
+    dist = None
+    if not r1.is_unmapped and not r2.is_unmapped:
+        ## Contact distances can be calculated for intrachromosomal reads only
+        if isIntraChrom(read1, read2):
+            r1pos = get_read_pos(read1)
+            r2pos = get_read_pos(read2)
+            dist = abs(r1pos - r2pos)
+    return dist
 
 
 def get_read_tag(read, tag):
@@ -255,15 +259,15 @@ if __name__ == "__main__":
     CF_ascounter = 0
 
     baseReadsFile = os.path.basename(mappedReadsFile)
-    baseReadsFile = re.sub(r'\.bam$|\.sam$', '', baseReadsFile)
+    baseReadsFile = re.sub(r"\.bam$|\.sam$", "", baseReadsFile)
 
     # Open handlers for output files
-    handle_valid = open(outputDir + '/' + baseReadsFile + '.validPairs', 'w')
+    handle_valid = open(outputDir + "/" + baseReadsFile + ".validPairs", "w")
 
     if allOutput:
-        handle_dump = open(outputDir + '/' + baseReadsFile + '.DumpPairs', 'w')
-        handle_single = open(outputDir + '/' + baseReadsFile + '.SinglePairs','w')
-        handle_filt = open(outputDir + '/' + baseReadsFile + '.FiltPairs','w')
+        handle_dump = open(outputDir + "/" + baseReadsFile + ".DumpPairs", "w")
+        handle_single = open(outputDir + "/" + baseReadsFile + ".SinglePairs", "w")
+        handle_filt = open(outputDir + "/" + baseReadsFile + ".FiltPairs", "w")
 
     # Read the SAM/BAM file
     if verbose:
@@ -306,7 +310,7 @@ if __name__ == "__main__":
                 cur_handler = handle_single if allOutput else None
 
             # Check Distance criteria - Filter
-            if (minDist is not None and dist is not None and dist < int(minDist)):
+            if minDist is not None and dist is not None and dist < int(minDist):
                 interactionType = "FILT"
                 filt_counter += 1
                 cur_handler = handle_filt if allOutput else None
@@ -330,13 +334,11 @@ if __name__ == "__main__":
                     dump_counter += 1
                     cur_handler = handle_dump if allOutput else None
 
-
-
             # Split valid pairs based on XA tag
             if gtag is not None:
                 r1as = get_read_tag(r1, gtag)
                 r2as = get_read_tag(r2, gtag)
-                        
+
                 if r1as == 1 and r2as == 1:
                     G1G1_ascounter += 1
                 elif r1as == 2 and r2as == 2:
@@ -357,11 +359,10 @@ if __name__ == "__main__":
                     CF_ascounter += 1
                 else:
                     UU_ascounter += 1
-                        
-       
+
             if cur_handler is not None:
                 if not r1.is_unmapped and not r2.is_unmapped:
-                    
+
                     ##reorient reads to ease duplicates removal
                     or1, or2 = get_ordered_reads(r1, r2)
                     or1_chrom = samfile.get_reference_name(or1.reference_id)
@@ -371,53 +372,93 @@ if __name__ == "__main__":
                     r1as = get_read_tag(or1, gtag)
                     r2as = get_read_tag(or2, gtag)
                     if gtag is not None:
-                        htag = str(r1as)+"-"+str(r2as)
-                        
+                        htag = str(r1as) + "-" + str(r2as)
+
                     cur_handler.write(
-                        or1.query_name + "\t" +
-                        or1_chrom + "\t" +
-                        str(get_read_pos(or1)+1) + "\t" +
-                        str(get_read_strand(or1)) + "\t" +
-                        or2_chrom + "\t" +
-                        str(get_read_pos(or2)+1) + "\t" +
-                        str(get_read_strand(or2)) + "\t" +
-                        "NA" + "\t" + ##dist 
-                        "NA" + "\t" + ##resfrag1
-                        "NA" + "\t" + ##resfrag2
-                        str(or1.mapping_quality) + "\t" + 
-                        str(or2.mapping_quality) + "\t" + 
-                        str(htag) + "\n")
-                
+                        or1.query_name
+                        + "\t"
+                        + or1_chrom
+                        + "\t"
+                        + str(get_read_pos(or1) + 1)
+                        + "\t"
+                        + str(get_read_strand(or1))
+                        + "\t"
+                        + or2_chrom
+                        + "\t"
+                        + str(get_read_pos(or2) + 1)
+                        + "\t"
+                        + str(get_read_strand(or2))
+                        + "\t"
+                        + "NA"
+                        + "\t"
+                        + "NA"  ##dist
+                        + "\t"
+                        + "NA"  ##resfrag1
+                        + "\t"
+                        + str(or1.mapping_quality)  ##resfrag2
+                        + "\t"
+                        + str(or2.mapping_quality)
+                        + "\t"
+                        + str(htag)
+                        + "\n"
+                    )
+
                 elif r2.is_unmapped and not r1.is_unmapped:
                     cur_handler.write(
-                        r1.query_name + "\t" +
-                        r1_chrom + "\t" +
-                        str(get_read_pos(r1)+1) + "\t" +
-                        str(get_read_strand(r1)) + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" + 
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        str(r1.mapping_quality) + "\t" + 
-                        "*" + "\n")
+                        r1.query_name
+                        + "\t"
+                        + r1_chrom
+                        + "\t"
+                        + str(get_read_pos(r1) + 1)
+                        + "\t"
+                        + str(get_read_strand(r1))
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + str(r1.mapping_quality)
+                        + "\t"
+                        + "*"
+                        + "\n"
+                    )
                 elif r1.is_unmapped and not r2.is_unmapped:
                     cur_handler.write(
-                        r2.query_name + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        r2_chrom + "\t" +
-                        str(get_read_pos(r2)+1) + "\t" +
-                        str(get_read_strand(r2)) + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" + 
-                        str(r2.mapping_quality) + "\n")
-
-            if (reads_counter % 100000 == 0 and verbose):
+                        r2.query_name
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + r2_chrom
+                        + "\t"
+                        + str(get_read_pos(r2) + 1)
+                        + "\t"
+                        + str(get_read_strand(r2))
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + str(r2.mapping_quality)
+                        + "\n"
+                    )
+
+            if reads_counter % 100000 == 0 and verbose:
                 print("##", reads_counter)
 
     # Close handler
@@ -428,7 +469,7 @@ if __name__ == "__main__":
         handle_filt.close()
 
     # Write stats file
-    with open(outputDir + '/' + baseReadsFile + '.RSstat', 'w') as handle_stat:
+    with open(outputDir + "/" + baseReadsFile + ".RSstat", "w") as handle_stat:
         handle_stat.write("## Hi-C processing - no restriction fragments\n")
         handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
         handle_stat.write("Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
@@ -439,17 +480,24 @@ if __name__ == "__main__":
         handle_stat.write("Filtered_pairs\t" + str(filt_counter) + "\n")
         handle_stat.write("Dumped_pairs\t" + str(dump_counter) + "\n")
 
-    ## Write AS report
+        ## Write AS report
         if gtag is not None:
             handle_stat.write("## ======================================\n")
             handle_stat.write("## Allele specific information\n")
             handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
-            handle_stat.write("Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t" + str(UG1_ascounter+G1U_ascounter) + "\n")
+            handle_stat.write(
+                "Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t"
+                + str(UG1_ascounter + G1U_ascounter)
+                + "\n"
+            )
             handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
-            handle_stat.write("Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t" + str(UG2_ascounter+G2U_ascounter) + "\n")
-            handle_stat.write("Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter+G2G1_ascounter) + "\n")
+            handle_stat.write(
+                "Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t"
+                + str(UG2_ascounter + G2U_ascounter)
+                + "\n"
+            )
+            handle_stat.write(
+                "Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter + G2G1_ascounter) + "\n"
+            )
             handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
             handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
-
-
-
diff --git a/bin/mapped_2hic_fragments.py b/bin/mapped_2hic_fragments.py
index e823ee02cce862b704c2b6939d1642db579665be..cc0e40b472dfb764ba1bf2f550c00c79bce7bd3f 100755
--- a/bin/mapped_2hic_fragments.py
+++ b/bin/mapped_2hic_fragments.py
@@ -32,8 +32,12 @@ def usage():
     print("[-t/--shortestFragmentLength] <Shortest restriction fragment length to consider>")
     print("[-m/--longestFragmentLength] <Longest restriction fragment length to consider>")
     print("[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>")
-    print("[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>")
-    print("[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>")
+    print(
+        "[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>"
+    )
+    print(
+        "[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>"
+    )
     print("[-S/--sam] <Output an additional SAM file with flag 'CT' for pairs classification>")
     print("[-v/--verbose] <Verbose>")
     print("[-h/--help] <Help>")
@@ -46,13 +50,22 @@ def get_args():
         opts, args = getopt.getopt(
             sys.argv[1:],
             "f:r:o:s:l:t:m:d:g:Svah",
-            ["fragmentFile=",
-             "mappedReadsFile=",
-             "outputDir=", 
-             "minInsertSize=", "maxInsertSize", 
-             "minFragSize", "maxFragSize", 
-             "minDist",
-             "gatg", "sam", "verbose", "all", "help"])
+            [
+                "fragmentFile=",
+                "mappedReadsFile=",
+                "outputDir=",
+                "minInsertSize=",
+                "maxInsertSize",
+                "minFragSize",
+                "maxFragSize",
+                "minDist",
+                "gatg",
+                "sam",
+                "verbose",
+                "all",
+                "help",
+            ],
+        )
     except getopt.GetoptError:
         usage()
         sys.exit(-1)
@@ -66,7 +79,7 @@ def timing(function, *args):
     """
     startTime = time.time()
     result = function(*args)
-    print('{} function took {:.3f}ms'.format(function.__name__, (time.time() - startTime) * 1000))
+    print("{} function took {:.3f}ms".format(function.__name__, (time.time() - startTime) * 1000))
     return result
 
 
@@ -88,7 +101,7 @@ def get_read_strand(read):
 def isIntraChrom(read1, read2):
     """
     Return true is the reads pair is intrachromosomal
-    
+
     read1 : [AlignedRead]
     read2 : [AlignedRead]
 
@@ -99,22 +112,22 @@ def isIntraChrom(read1, read2):
 
 
 def get_cis_dist(read1, read2):
-     """
-     Calculte the contact distance between two intrachromosomal reads
+    """
+    Calculte the contact distance between two intrachromosomal reads
 
-     read1 : [AlignedRead]
-     read2 : [AlignedRead]
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
 
-     """
-     # Get oriented reads
-     ##r1, r2 = get_ordered_reads(read1, read2)
-     dist = None
-     if not read1.is_unmapped and not read2.is_unmapped:         
-         ## Contact distances can be calculated for intrachromosomal reads only
-         if isIntraChrom(read1, read2):
-             r1pos, r2pos = get_read_pos(read1), get_read_pos(read2)
-             dist = abs(r1pos - r2pos)
-     return dist
+    """
+    # Get oriented reads
+    ##r1, r2 = get_ordered_reads(read1, read2)
+    dist = None
+    if not read1.is_unmapped and not read2.is_unmapped:
+        ## Contact distances can be calculated for intrachromosomal reads only
+        if isIntraChrom(read1, read2):
+            r1pos, r2pos = get_read_pos(read1), get_read_pos(read2)
+            dist = abs(r1pos - r2pos)
+    return dist
 
 
 def get_read_pos(read, st="start"):
@@ -135,12 +148,12 @@ def get_read_pos(read, st="start"):
     """
 
     if st == "middle":
-        pos = read.reference_start + int(read.alen/2)
-    elif st =="start":
+        pos = read.reference_start + int(read.alen / 2)
+    elif st == "start":
         pos = get_read_start(read)
     elif st == "left":
         pos = read.reference_start
-    
+
     return pos
 
 
@@ -149,11 +162,12 @@ def get_read_start(read):
     Return the 5' end of the read
     """
     if read.is_reverse:
-        pos = read.reference_start + read.alen -1
+        pos = read.reference_start + read.alen - 1
     else:
         pos = read.reference_start
     return pos
 
+
 def get_ordered_reads(read1, read2):
     """
     Reorient reads
@@ -183,9 +197,10 @@ def get_ordered_reads(read1, read2):
             r1, r2 = read1, read2
         else:
             r1, r2 = read2, read1
-                
+
     return r1, r2
 
+
 def load_restriction_fragment(in_file, minfragsize=None, maxfragsize=None, verbose=False):
     """
     Read a BED file and store the intervals in a tree
@@ -204,37 +219,37 @@ def load_restriction_fragment(in_file, minfragsize=None, maxfragsize=None, verbo
     nline = 0
     nfilt = 0
     for line in bed_handle:
-         nline += 1
-         bedtab = line.split("\t")
-         try:
-              chromosome, start, end, name = bedtab[:4]
-         except ValueError:
-              print("Warning : wrong input format in line {}. Not a BED file ?!".format(nline))
-              continue
+        nline += 1
+        bedtab = line.split("\t")
+        try:
+            chromosome, start, end, name = bedtab[:4]
+        except ValueError:
+            print("Warning : wrong input format in line {}. Not a BED file ?!".format(nline))
+            continue
 
         # BED files are zero-based as Intervals objects
-         start = int(start)  # + 1
-         end = int(end)
-         fragl = abs(end - start)
-         name = name.strip()
-
-         ## Discard fragments outside the size range
-         filt = False
-         if minfragsize != None and int(fragl) < int(minfragsize):
-             nfilt += 1
-             filt = True
-         elif maxfragsize != None and int(fragl) > int(maxfragsize):
-             nfilt += 1
-             filt = True
-       
-         if chromosome in resFrag:
-             tree = resFrag[chromosome]
-             tree.add_interval(Interval(start, end, value={'name': name, 'filter': filt}))
-         else:
-             tree = Intersecter()
-             tree.add_interval(Interval(start, end, value={'name': name, 'filter': filt}))
-             resFrag[chromosome] = tree
-    
+        start = int(start)  # + 1
+        end = int(end)
+        fragl = abs(end - start)
+        name = name.strip()
+
+        ## Discard fragments outside the size range
+        filt = False
+        if minfragsize != None and int(fragl) < int(minfragsize):
+            nfilt += 1
+            filt = True
+        elif maxfragsize != None and int(fragl) > int(maxfragsize):
+            nfilt += 1
+            filt = True
+
+        if chromosome in resFrag:
+            tree = resFrag[chromosome]
+            tree.add_interval(Interval(start, end, value={"name": name, "filter": filt}))
+        else:
+            tree = Intersecter()
+            tree.add_interval(Interval(start, end, value={"name": name, "filter": filt}))
+            resFrag[chromosome] = tree
+
     if nfilt > 0:
         print("Warning : {} fragment(s) outside of range and discarded. {} remaining.".format(nfilt, nline - nfilt))
     bed_handle.close()
@@ -253,10 +268,10 @@ def get_overlapping_restriction_fragment(resFrag, chrom, read):
     """
     # Get read position (middle or start)
     pos = get_read_pos(read, st="middle")
-    
+
     if chrom in resFrag:
         # Overlap with the position of the read (zero-based)
-        resfrag = resFrag[chrom].find(pos, pos+1)
+        resfrag = resFrag[chrom].find(pos, pos + 1)
         if len(resfrag) > 1:
             print("Warning : {} restictions fragments found for {} -skipped".format(len(resfrag), read.query_name))
             return None
@@ -271,21 +286,22 @@ def get_overlapping_restriction_fragment(resFrag, chrom, read):
 
 
 def are_contiguous_fragments(frag1, frag2, chr1, chr2):
-    '''
+    """
     Compare fragment positions to check if they are contiguous
-    '''
+    """
     ret = False
     if chr1 == chr2:
         if int(frag1.start) < int(frag2.start):
             d = int(frag2.start) - int(frag1.end)
         else:
             d = int(frag1.start) - int(frag2.end)
-            
+
         if d == 0:
             ret = True
-    
+
     return ret
 
+
 def is_religation(read1, read2, frag1, frag2):
     """
     Reads are expected to map adjacent fragments
@@ -294,8 +310,8 @@ def is_religation(read1, read2, frag1, frag2):
     """
     ret = False
     if are_contiguous_fragments(frag1, frag2, read1.tid, read2.tid):
-        #r1, r2 = get_ordered_reads(read1, read2)
-        #if get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
+        # r1, r2 = get_ordered_reads(read1, read2)
+        # if get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
         ret = True
     return ret
 
@@ -405,8 +421,7 @@ def get_PE_fragment_size(read1, read2, resFrag1, resFrag2, interactionType):
     return fragmentsize
 
 
-def get_interaction_type(read1, read1_chrom, resfrag1, read2,
-                         read2_chrom, resfrag2, verbose):
+def get_interaction_type(read1, read1_chrom, resfrag1, read2, read2_chrom, resfrag2, verbose):
     """
     Returns the interaction type
 
@@ -433,7 +448,7 @@ def get_interaction_type(read1, read1_chrom, resfrag1, read2,
     # If returned InteractionType=None -> Same restriction fragment
     # and same strand = Dump
     interactionType = None
-      
+
     if not read1.is_unmapped and not read2.is_unmapped and resfrag1 is not None and resfrag2 is not None:
         # same restriction fragment
         if resfrag1 == resfrag2:
@@ -549,29 +564,29 @@ if __name__ == "__main__":
     CF_ascounter = 0
 
     baseReadsFile = os.path.basename(mappedReadsFile)
-    baseReadsFile = re.sub(r'\.bam$|\.sam$', '', baseReadsFile)
+    baseReadsFile = re.sub(r"\.bam$|\.sam$", "", baseReadsFile)
 
     # Open handlers for output files
-    handle_valid = open(outputDir + '/' + baseReadsFile + '.validPairs', 'w')
+    handle_valid = open(outputDir + "/" + baseReadsFile + ".validPairs", "w")
 
     if allOutput:
-        handle_de = open(outputDir + '/' + baseReadsFile + '.DEPairs', 'w')
-        handle_re = open(outputDir + '/' + baseReadsFile + '.REPairs', 'w')
-        handle_sc = open(outputDir + '/' + baseReadsFile + '.SCPairs', 'w')
-        handle_dump = open(outputDir + '/' + baseReadsFile + '.DumpPairs', 'w')
-        handle_single = open(outputDir + '/' + baseReadsFile + '.SinglePairs', 'w')
-        handle_filt = open(outputDir + '/' + baseReadsFile + '.FiltPairs', 'w')
+        handle_de = open(outputDir + "/" + baseReadsFile + ".DEPairs", "w")
+        handle_re = open(outputDir + "/" + baseReadsFile + ".REPairs", "w")
+        handle_sc = open(outputDir + "/" + baseReadsFile + ".SCPairs", "w")
+        handle_dump = open(outputDir + "/" + baseReadsFile + ".DumpPairs", "w")
+        handle_single = open(outputDir + "/" + baseReadsFile + ".SinglePairs", "w")
+        handle_filt = open(outputDir + "/" + baseReadsFile + ".FiltPairs", "w")
 
     # Read the BED file
     resFrag = timing(load_restriction_fragment, fragmentFile, minFragSize, maxFragSize, verbose)
-     
+
     # Read the SAM/BAM file
     if verbose:
         print("## Opening SAM/BAM file {} ...".format(mappedReadsFile))
     samfile = pysam.Samfile(mappedReadsFile, "rb")
 
     if samOut:
-        handle_sam = pysam.AlignmentFile(outputDir + '/' + baseReadsFile + '_interaction.bam', "wb", template=samfile)
+        handle_sam = pysam.AlignmentFile(outputDir + "/" + baseReadsFile + "_interaction.bam", "wb", template=samfile)
 
     # Reads are 0-based too (for both SAM and BAM format)
     # Loop on all reads
@@ -608,22 +623,24 @@ if __name__ == "__main__":
                 interactionType = get_interaction_type(r1, r1_chrom, r1_resfrag, r2, r2_chrom, r2_resfrag, verbose)
                 dist = get_PE_fragment_size(r1, r2, r1_resfrag, r2_resfrag, interactionType)
                 cdist = get_cis_dist(r1, r2)
-                
+
                 ## Filter based on restriction fragments
-                if (r1_resfrag is not None and r1_resfrag.value['filter'] == True) or (r2_resfrag is not None and r2_resfrag.value['filter']) == True:
+                if (r1_resfrag is not None and r1_resfrag.value["filter"] == True) or (
+                    r2_resfrag is not None and r2_resfrag.value["filter"]
+                ) == True:
                     interactionType = "FILT"
-   
+
                 # Check Insert size criteria - FILT
-                if (minInsertSize is not None and dist is not None and
-                    dist < int(minInsertSize)) or \
-                    (maxInsertSize is not None and dist is not None and dist > int(maxInsertSize)):
+                if (minInsertSize is not None and dist is not None and dist < int(minInsertSize)) or (
+                    maxInsertSize is not None and dist is not None and dist > int(maxInsertSize)
+                ):
                     interactionType = "FILT"
 
                 # Check Distance criteria - FILT
                 # Done for VI otherwise this criteria will overwrite all other invalid classification
-                if (interactionType == "VI" and minDist is not None and cdist is not None and cdist < int(minDist)):
+                if interactionType == "VI" and minDist is not None and cdist is not None and cdist < int(minDist):
                     interactionType = "FILT"
-        
+
                 if interactionType == "VI":
                     valid_counter += 1
                     cur_handler = handle_valid
@@ -677,11 +694,11 @@ if __name__ == "__main__":
                 elif interactionType == "SI":
                     single_counter += 1
                     cur_handler = handle_single if allOutput else None
-                
+
                 elif interactionType == "FILT":
                     filt_counter += 1
                     cur_handler = handle_filt if allOutput else None
-                
+
                 else:
                     interactionType = "DUMP"
                     dump_counter += 1
@@ -694,17 +711,17 @@ if __name__ == "__main__":
 
             ## Write results in right handler
             if cur_handler is not None:
-                if not r1.is_unmapped and not r2.is_unmapped:                 
+                if not r1.is_unmapped and not r2.is_unmapped:
                     ##reorient reads to ease duplicates removal
                     or1, or2 = get_ordered_reads(r1, r2)
                     or1_chrom = samfile.get_reference_name(or1.tid)
                     or2_chrom = samfile.get_reference_name(or2.tid)
-                    
+
                     ##reset as tag now that the reads are oriented
                     r1as = get_read_tag(or1, gtag)
                     r2as = get_read_tag(or2, gtag)
                     if gtag is not None:
-                        htag = str(r1as)+"-"+str(r2as)
+                        htag = str(r1as) + "-" + str(r2as)
 
                     ##get fragment name and reorient if necessary
                     if or1 == r1 and or2 == r2:
@@ -715,73 +732,113 @@ if __name__ == "__main__":
                         or2_resfrag = r1_resfrag
 
                     if or1_resfrag is not None:
-                        or1_fragname = or1_resfrag.value['name']
+                        or1_fragname = or1_resfrag.value["name"]
                     else:
-                        or1_fragname = 'None'
-                        
+                        or1_fragname = "None"
+
                     if or2_resfrag is not None:
-                        or2_fragname = or2_resfrag.value['name']
+                        or2_fragname = or2_resfrag.value["name"]
                     else:
-                        or2_fragname = 'None'
-                        
+                        or2_fragname = "None"
+
                     cur_handler.write(
-                        or1.query_name + "\t" +
-                        or1_chrom + "\t" +
-                        str(get_read_pos(or1)+1) + "\t" +
-                        str(get_read_strand(or1)) + "\t" +
-                        or2_chrom + "\t" +
-                        str(get_read_pos(or2)+1) + "\t" +
-                        str(get_read_strand(or2)) + "\t" +
-                        str(dist) + "\t" + 
-                        or1_fragname + "\t" +
-                        or2_fragname + "\t" +
-                        str(or1.mapping_quality) + "\t" + 
-                        str(or2.mapping_quality) + "\t" + 
-                        str(htag) + "\n")
+                        or1.query_name
+                        + "\t"
+                        + or1_chrom
+                        + "\t"
+                        + str(get_read_pos(or1) + 1)
+                        + "\t"
+                        + str(get_read_strand(or1))
+                        + "\t"
+                        + or2_chrom
+                        + "\t"
+                        + str(get_read_pos(or2) + 1)
+                        + "\t"
+                        + str(get_read_strand(or2))
+                        + "\t"
+                        + str(dist)
+                        + "\t"
+                        + or1_fragname
+                        + "\t"
+                        + or2_fragname
+                        + "\t"
+                        + str(or1.mapping_quality)
+                        + "\t"
+                        + str(or2.mapping_quality)
+                        + "\t"
+                        + str(htag)
+                        + "\n"
+                    )
 
                 elif r2.is_unmapped and not r1.is_unmapped:
                     if r1_resfrag is not None:
-                        r1_fragname = r1_resfrag.value['name']
-                          
+                        r1_fragname = r1_resfrag.value["name"]
+
                     cur_handler.write(
-                        r1.query_name + "\t" +
-                        r1_chrom + "\t" +
-                        str(get_read_pos(r1)+1) + "\t" +
-                        str(get_read_strand(r1)) + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" + 
-                        r1_fragname + "\t" +
-                        "*" + "\t" +
-                        str(r1.mapping_quality) + "\t" + 
-                        "*" + "\n")
+                        r1.query_name
+                        + "\t"
+                        + r1_chrom
+                        + "\t"
+                        + str(get_read_pos(r1) + 1)
+                        + "\t"
+                        + str(get_read_strand(r1))
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + r1_fragname
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + str(r1.mapping_quality)
+                        + "\t"
+                        + "*"
+                        + "\n"
+                    )
                 elif r1.is_unmapped and not r2.is_unmapped:
                     if r2_resfrag is not None:
-                        r2_fragname = r2_resfrag.value['name']
-                    
+                        r2_fragname = r2_resfrag.value["name"]
+
                     cur_handler.write(
-                        r2.query_name + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        r2_chrom + "\t" +
-                        str(get_read_pos(r2)+1) + "\t" +
-                        str(get_read_strand(r2)) + "\t" +
-                        "*" + "\t" +
-                        "*" + "\t" +
-                        r2_fragname + "\t" +
-                        "*" + "\t" +
-                        str(r2.mapping_quality) + "\n")
-
-                ## Keep initial order    
+                        r2.query_name
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + r2_chrom
+                        + "\t"
+                        + str(get_read_pos(r2) + 1)
+                        + "\t"
+                        + str(get_read_strand(r2))
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + r2_fragname
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + str(r2.mapping_quality)
+                        + "\n"
+                    )
+
+                ## Keep initial order
                 if samOut:
-                    r1.tags = r1.tags + [('CT', str(interactionType))]
-                    r2.tags = r2.tags + [('CT', str(interactionType))]
+                    r1.tags = r1.tags + [("CT", str(interactionType))]
+                    r2.tags = r2.tags + [("CT", str(interactionType))]
                     handle_sam.write(r1)
                     handle_sam.write(r2)
 
-            if (reads_counter % 100000 == 0 and verbose):
+            if reads_counter % 100000 == 0 and verbose:
                 print("##", reads_counter)
 
     # Close handler
@@ -794,9 +851,8 @@ if __name__ == "__main__":
         handle_single.close()
         handle_filt.close()
 
-
     # Write stats file
-    handle_stat = open(outputDir + '/' + baseReadsFile + '.RSstat', 'w')
+    handle_stat = open(outputDir + "/" + baseReadsFile + ".RSstat", "w")
     handle_stat.write("## Hi-C processing\n")
     handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
     handle_stat.write("Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
@@ -815,10 +871,20 @@ if __name__ == "__main__":
         handle_stat.write("## ======================================\n")
         handle_stat.write("## Allele specific information\n")
         handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
-        handle_stat.write("Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t" + str(UG1_ascounter+G1U_ascounter) + "\n")
+        handle_stat.write(
+            "Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t"
+            + str(UG1_ascounter + G1U_ascounter)
+            + "\n"
+        )
         handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
-        handle_stat.write("Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t" + str(UG2_ascounter+G2U_ascounter) + "\n")
-        handle_stat.write("Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter+G2G1_ascounter) + "\n")
+        handle_stat.write(
+            "Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t"
+            + str(UG2_ascounter + G2U_ascounter)
+            + "\n"
+        )
+        handle_stat.write(
+            "Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter + G2G1_ascounter) + "\n"
+        )
         handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
         handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
 
diff --git a/bin/mergeSAM.py b/bin/mergeSAM.py
index a907fd77438307ffc808ce7d5ac0d7684c22f5f8..82ab8c34d979240d12de57b1f2510a54a79d2ca9 100755
--- a/bin/mergeSAM.py
+++ b/bin/mergeSAM.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
 ## HiC-Pro
-## Copyright (c) 2015 Institut Curie                               
+## Copyright (c) 2015 Institut Curie
 ## Author(s): Nicolas Servant, Eric Viara
 ## Contact: nicolas.servant@curie.fr
 ## This software is distributed without any guarantee under the terms of the BSD-3 licence.
@@ -20,6 +20,7 @@ import os
 import re
 import pysam
 
+
 def usage():
     """Usage function"""
     print("Usage : python mergeSAM.py")
@@ -41,10 +42,8 @@ def get_args():
         opts, args = getopt.getopt(
             sys.argv[1:],
             "f:r:o:q:smtvh",
-            ["forward=",
-             "reverse=",
-             "output=", "qual=", 
-             "single", "multi", "stat", "verbose", "help"])
+            ["forward=", "reverse=", "output=", "qual=", "single", "multi", "stat", "verbose", "help"],
+        )
     except getopt.GetoptError:
         usage()
         sys.exit(-1)
@@ -53,24 +52,26 @@ def get_args():
 
 def is_unique_bowtie2(read):
     ret = False
-    if not read.is_unmapped and read.has_tag('AS'):
-        if read.has_tag('XS'):
-            primary =  read.get_tag('AS')
-            secondary = read.get_tag('XS')
-            if (primary > secondary):
+    if not read.is_unmapped and read.has_tag("AS"):
+        if read.has_tag("XS"):
+            primary = read.get_tag("AS")
+            secondary = read.get_tag("XS")
+            if primary > secondary:
                 ret = True
         else:
             ret = True
     return ret
 
+
 ## Remove everything after "/" or " " in read's name
 def get_read_name(read):
     name = read.query_name
-    #return name.split("/",1)[0]
-    return re.split('/| ', name)[0]
+    # return name.split("/",1)[0]
+    return re.split("/| ", name)[0]
+
 
 def sam_flag(read1, read2, hr1, hr2):
-	
+
     f1 = read1.flag
     f2 = read2.flag
 
@@ -81,7 +82,7 @@ def sam_flag(read1, read2, hr1, hr2):
     if r2.is_unmapped == False:
         r2_chrom = hr2.get_reference_name(r2.reference_id)
     else:
-        r2_chrom="*"
+        r2_chrom = "*"
 
     ##Relevant bitwise flags (flag in an 11-bit binary number)
     ##1 The read is one of a pair
@@ -92,54 +93,53 @@ def sam_flag(read1, read2, hr1, hr2):
     ##32 The other mate in the paired-end alignment is aligned to the reverse reference strand
     ##64 The read is the first (#1) mate in a pair
     ##128 The read is the second (#2) mate in a pair
-  
-    ##The reads were mapped as single-end data, so should expect flags of 
+
+    ##The reads were mapped as single-end data, so should expect flags of
     ##0 (map to the '+' strand) or 16 (map to the '-' strand)
-    ##Output example: a paired-end read that aligns to the reverse strand 
+    ##Output example: a paired-end read that aligns to the reverse strand
     ##and is the first mate in the pair will have flag 83 (= 64 + 16 + 2 + 1)
-  
+
     if f1 & 0x4:
         f1 = f1 | 0x8
 
     if f2 & 0x4:
         f2 = f2 | 0x8
-    
-    if (not (f1 & 0x4) and not (f2 & 0x4)):
+
+    if not (f1 & 0x4) and not (f2 & 0x4):
         ##The flag should now indicate this is paired-end data
         f1 = f1 | 0x1
         f1 = f1 | 0x2
         f2 = f2 | 0x1
-        f2 = f2 | 0x2  
-    
+        f2 = f2 | 0x2
+
     ##Indicate if the pair is on the reverse strand
     if f1 & 0x10:
         f2 = f2 | 0x20
-  
+
     if f2 & 0x10:
         f1 = f1 | 0x20
-  
+
     ##Is this first or the second pair?
     f1 = f1 | 0x40
     f2 = f2 | 0x80
-  
+
     ##Insert the modified bitwise flags into the reads
     read1.flag = f1
     read2.flag = f2
-	
+
     ##Determine the RNEXT and PNEXT values (i.e. the positional values of a read's pair)
-    #RNEXT
+    # RNEXT
     if r1_chrom == r2_chrom:
         read1.next_reference_id = r1.reference_id
         read2.next_reference_id = r1.reference_id
     else:
         read1.next_reference_id = r2.reference_id
         read2.next_reference_id = r1.reference_id
-    #PNEXT
+    # PNEXT
     read1.next_reference_start = read2.reference_start
     read2.next_reference_start = read1.reference_start
 
-    return(read1, read2)
-
+    return (read1, read2)
 
 
 if __name__ == "__main__":
@@ -196,13 +196,13 @@ if __name__ == "__main__":
     tot_pairs_counter = 0
     multi_pairs_counter = 0
     uniq_pairs_counter = 0
-    unmapped_pairs_counter = 0 
+    unmapped_pairs_counter = 0
     lowq_pairs_counter = 0
     multi_singles_counter = 0
     uniq_singles_counter = 0
     lowq_singles_counter = 0
 
-    #local_counter = 0
+    # local_counter = 0
     paired_reads_counter = 0
     singleton_counter = 0
     reads_counter = 0
@@ -213,31 +213,31 @@ if __name__ == "__main__":
     ## Loop on all reads
     if verbose:
         print("## Merging forward and reverse tags ...")
-    
-    with pysam.Samfile(R1file, "rb") as hr1, pysam.Samfile(R2file, "rb") as hr2: 
+
+    with pysam.Samfile(R1file, "rb") as hr1, pysam.Samfile(R2file, "rb") as hr2:
         if output == "-":
             outfile = pysam.AlignmentFile(output, "w", template=hr1)
         else:
             outfile = pysam.AlignmentFile(output, "wb", template=hr1)
-	
+
         for r1, r2 in zip(hr1.fetch(until_eof=True), hr2.fetch(until_eof=True)):
-            reads_counter +=1
-            if (reads_counter % 1000000 == 0 and verbose):
+            reads_counter += 1
+            if reads_counter % 1000000 == 0 and verbose:
                 print("##", reads_counter)
-                
+
             if get_read_name(r1) == get_read_name(r2):
                 ## both unmapped
                 if r1.is_unmapped == True and r2.is_unmapped == True:
                     unmapped_pairs_counter += 1
                     continue
-                    
+
                 ## both mapped
                 elif r1.is_unmapped == False and r2.is_unmapped == False:
                     ## quality
                     if mapq != None and (r1.mapping_quality < int(mapq) or r2.mapping_quality < int(mapq)):
                         lowq_pairs_counter += 1
                         continue
-                 
+
                     ## Unique mapping
                     if is_unique_bowtie2(r1) == True and is_unique_bowtie2(r2) == True:
                         uniq_pairs_counter += 1
@@ -253,7 +253,7 @@ if __name__ == "__main__":
                         continue
                     if r1.is_unmapped == False:  ## first end is mapped, second is not
                         ## quality
-                        if mapq != None and (r1.mapping_quality < int(mapq)): 
+                        if mapq != None and (r1.mapping_quality < int(mapq)):
                             lowq_singles_counter += 1
                             continue
                         ## Unique mapping
@@ -265,7 +265,7 @@ if __name__ == "__main__":
                                 continue
                     else:  ## second end is mapped, first is not
                         ## quality
-                        if mapq != None and (r2.mapping_quality < int(mapq)): 
+                        if mapq != None and (r2.mapping_quality < int(mapq)):
                             lowq_singles_counter += 1
                             continue
                         ## Unique mapping
@@ -276,34 +276,95 @@ if __name__ == "__main__":
                             if report_multi == False:
                                 continue
 
-                tot_pairs_counter += 1          
-                (r1, r2) = sam_flag(r1,r2, hr1, hr2)
+                tot_pairs_counter += 1
+                (r1, r2) = sam_flag(r1, r2, hr1, hr2)
 
                 ## Write output
                 outfile.write(r1)
                 outfile.write(r2)
-                
+
             else:
-                print("Forward and reverse reads not paired. Check that BAM files have the same read names and are sorted.")
+                print(
+                    "Forward and reverse reads not paired. Check that BAM files have the same read names and are sorted."
+                )
                 sys.exit(1)
 
         if stat:
-            if output == '-':
+            if output == "-":
                 statfile = "pairing.stat"
             else:
-                statfile = re.sub('\.bam$', '.pairstat', output)
-            with open(statfile, 'w') as handle_stat:
-                handle_stat.write("Total_pairs_processed\t" + str(reads_counter) + "\t" + str(round(float(reads_counter)/float(reads_counter)*100,3)) + "\n")
-                handle_stat.write("Unmapped_pairs\t" + str(unmapped_pairs_counter) + "\t" + str(round(float(unmapped_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-                handle_stat.write("Low_qual_pairs\t" + str(lowq_pairs_counter) + "\t" + str(round(float(lowq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-                handle_stat.write("Unique_paired_alignments\t" + str(uniq_pairs_counter) + "\t" + str(round(float(uniq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-                handle_stat.write("Multiple_pairs_alignments\t" + str(multi_pairs_counter) + "\t" + str(round(float(multi_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-                handle_stat.write("Pairs_with_singleton\t" + str(singleton_counter) + "\t" + str(round(float(singleton_counter)/float(reads_counter)*100,3)) + "\n")  
-                handle_stat.write("Low_qual_singleton\t" + str(lowq_singles_counter) + "\t" + str(round(float(lowq_singles_counter)/float(reads_counter)*100,3)) + "\n")
-                handle_stat.write("Unique_singleton_alignments\t" + str(uniq_singles_counter) + "\t" + str(round(float(uniq_singles_counter)/float(reads_counter)*100,3)) + "\n")
-                handle_stat.write("Multiple_singleton_alignments\t" + str(multi_singles_counter) + "\t" + str(round(float(multi_singles_counter)/float(reads_counter)*100,3)) + "\n")
-                handle_stat.write("Reported_pairs\t" + str(tot_pairs_counter) + "\t" + str(round(float(tot_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+                statfile = re.sub("\.bam$", ".pairstat", output)
+            with open(statfile, "w") as handle_stat:
+                handle_stat.write(
+                    "Total_pairs_processed\t"
+                    + str(reads_counter)
+                    + "\t"
+                    + str(round(float(reads_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Unmapped_pairs\t"
+                    + str(unmapped_pairs_counter)
+                    + "\t"
+                    + str(round(float(unmapped_pairs_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Low_qual_pairs\t"
+                    + str(lowq_pairs_counter)
+                    + "\t"
+                    + str(round(float(lowq_pairs_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Unique_paired_alignments\t"
+                    + str(uniq_pairs_counter)
+                    + "\t"
+                    + str(round(float(uniq_pairs_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Multiple_pairs_alignments\t"
+                    + str(multi_pairs_counter)
+                    + "\t"
+                    + str(round(float(multi_pairs_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Pairs_with_singleton\t"
+                    + str(singleton_counter)
+                    + "\t"
+                    + str(round(float(singleton_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Low_qual_singleton\t"
+                    + str(lowq_singles_counter)
+                    + "\t"
+                    + str(round(float(lowq_singles_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Unique_singleton_alignments\t"
+                    + str(uniq_singles_counter)
+                    + "\t"
+                    + str(round(float(uniq_singles_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Multiple_singleton_alignments\t"
+                    + str(multi_singles_counter)
+                    + "\t"
+                    + str(round(float(multi_singles_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Reported_pairs\t"
+                    + str(tot_pairs_counter)
+                    + "\t"
+                    + str(round(float(tot_pairs_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
     hr1.close()
     hr2.close()
     outfile.close()
-
diff --git a/bin/merge_statfiles.py b/bin/merge_statfiles.py
index dc11bf75d31973df86a0eaae0aa1c4b37e004e27..c3986e1e6534eef84c0d11a7e95ee608dc571de2 100755
--- a/bin/merge_statfiles.py
+++ b/bin/merge_statfiles.py
@@ -1,7 +1,7 @@
 #!/usr/bin/env python
 
 ## nf-core-hic
-## Copyright (c) 2020 Institut Curie                               
+## Copyright (c) 2020 Institut Curie
 ## Author(s): Nicolas Servant
 ## Contact: nicolas.servant@curie.fr
 ## This software is distributed without any guarantee under the terms of the BSD-3 licence.
@@ -17,6 +17,7 @@ import glob
 import os
 from collections import OrderedDict
 
+
 def num(s):
     try:
         return int(s)
@@ -26,30 +27,30 @@ def num(s):
 
 if __name__ == "__main__":
     ## Read command line arguments
-    parser = argparse.ArgumentParser()      
-    parser.add_argument("-f", "--files", help="List of input file(s)", type=str, nargs='+')
-    parser.add_argument("-v", "--verbose", help="verbose mode", action='store_true')
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-f", "--files", help="List of input file(s)", type=str, nargs="+")
+    parser.add_argument("-v", "--verbose", help="verbose mode", action="store_true")
     args = parser.parse_args()
-               
+
     infiles = args.files
     li = len(infiles)
 
     if li > 0:
         if args.verbose:
             print("## merge_statfiles.py")
-            print("## Merging "+ str(li)+" files")
- 
+            print("## Merging " + str(li) + " files")
+
         ## Reading first file to get the template
         template = OrderedDict()
         if args.verbose:
-            print("## Use "+infiles[0]+" as template")
+            print("## Use " + infiles[0] + " as template")
         with open(infiles[0]) as f:
             for line in f:
                 if not line.startswith("#"):
                     lsp = line.strip().split("\t")
-                    data = map(num, lsp[1:len(lsp)])
+                    data = map(num, lsp[1 : len(lsp)])
                     template[str(lsp[0])] = list(data)
-                
+
         if len(template) == 0:
             print("Cannot find template files !")
             sys.exit(1)
@@ -63,20 +64,21 @@ if __name__ == "__main__":
                         if lsp[0] in template:
                             for i in list(range(1, len(lsp))):
                                 if isinstance(num(lsp[i]), int):
-                                    template[lsp[0]][i-1] += num(lsp[i])
+                                    template[lsp[0]][i - 1] += num(lsp[i])
                                 else:
-                                    template[lsp[0]][i-1] = round((template[lsp[0]][i-1] + num(lsp[i]))/2,3)
+                                    template[lsp[0]][i - 1] = round((template[lsp[0]][i - 1] + num(lsp[i])) / 2, 3)
                         else:
-                            sys.stderr.write("Warning : '"+lsp[0]+"' not found in template ["+infiles[fidx]+"]\n")
-                            
+                            sys.stderr.write(
+                                "Warning : '" + lsp[0] + "' not found in template [" + infiles[fidx] + "]\n"
+                            )
+
         ## Print template
         for x in template:
             sys.stdout.write(x)
             for y in template[x]:
-                sys.stdout.write("\t"+str(y))
+                sys.stdout.write("\t" + str(y))
             sys.stdout.write("\n")
 
     else:
         print("No files to merge - stop")
         sys.exit(1)
-
diff --git a/conf/base.config b/conf/base.config
index e1895e3f5a6e7dfcc9b68b268225d7a8dd5376a7..6808dbe2205a3cbe53975d4d2d2ec2fbc1998ec7 100644
--- a/conf/base.config
+++ b/conf/base.config
@@ -24,6 +24,11 @@ process {
     //        If possible, it would be nice to keep the same label naming convention when
     //        adding in your local modules too.
     // See https://www.nextflow.io/docs/latest/config.html#config-process-selectors
+    withLabel:process_single {
+        cpus   = { check_max( 1                  , 'cpus'    ) }
+        memory = { check_max( 6.GB * task.attempt, 'memory'  ) }
+        time   = { check_max( 4.h  * task.attempt, 'time'    ) }
+    }
     withLabel:process_low {
         cpus   = { check_max( 2     * task.attempt, 'cpus'    ) }
         memory = { check_max( 4.GB  * task.attempt, 'memory'  ) }
diff --git a/conf/modules.config b/conf/modules.config
index 5b279c54cb9ec6fc7a5b127a9292a10316434e55..096a86006168e216e1f68863f65e4dd2d5d96c7d 100644
--- a/conf/modules.config
+++ b/conf/modules.config
@@ -163,6 +163,7 @@ process {
             mode: 'copy',
             enabled: params.hicpro_maps
         ]
+        ext.prefix = { "${meta.id}.${resolution}" }
     }
 
     withName: 'ICE_NORMALIZATION' {
@@ -193,6 +194,7 @@ process {
             saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
             mode: 'copy'
         ]
+        ext.prefix={ "cooler_bins_${cool_bin}" }
     }
 
     withName: 'COOLER_CLOAD' {
@@ -202,6 +204,7 @@ process {
             mode: 'copy',
             enabled : params.save_raw_maps
         ]
+        ext.prefix = { "${meta.id}.${cool_bin}" }
         ext.args = "pairs -c1 2 -p1 3 -c2 4 -p2 5"
     }
 
@@ -212,6 +215,7 @@ process {
             mode: 'copy'
         ]
         ext.args = '--force'
+        ext.prefix = { "${cool.baseName}_balanced" }
     }
 
     withName: 'COOLER_DUMP' {
@@ -250,25 +254,27 @@ process {
     //********************************
     // COMPARTMENTS
 
-    withName: 'CALL_COMPARTMENTS' {
+    withName: 'COOLTOOLS_EIGSCIS' {
         publishDir = [
             path: { "${params.outdir}/compartments/" },
             saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
             mode: 'copy'
         ]
         ext.args = '--bigwig'
+        ext.prefix = { "${meta.id}.${resolution}" }
     }
 
     //********************************
     // TADS
 
-    withName: 'INSULATION' {
+    withName: 'COOLTOOLS_INSULATION' {
         publishDir = [
             path: { "${params.outdir}/tads/insulation/" },
             saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
             mode: 'copy'
         ]
         ext.args = '15 25 50 --window-pixels'
+        ext.prefix = { "${cool.baseName}" }
     }
 
     withName: 'HIC_FIND_TADS' {
@@ -278,5 +284,6 @@ process {
             mode: 'copy'
         ]
         ext.args = '--correctForMultipleTesting fdr'
+        ext.prefix = { "${cool.baseName}" }
     }
 }
diff --git a/conf/test_full.config b/conf/test_full.config
index a9a8183f5a25829e8f323446234ac89ab6dc4353..f7b39d4fe2c22e98608b0ce00dec75274bde30ca 100644
--- a/conf/test_full.config
+++ b/conf/test_full.config
@@ -14,10 +14,14 @@ params {
     config_profile_description = 'Full test dataset to check pipeline function'
 
     // Input data for full size test
-    // TODO nf-core: Specify the paths to your full test data ( on nf-core/test-datasets or directly in repositories, e.g. SRA)
-    // TODO nf-core: Give any required params for the test so that command line flags are not needed
-    input = 'https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_full_illumina_amplicon.csv'
+    input = 'https://raw.githubusercontent.com/nf-core/test-datasets/hic/samplesheet/samplesheet_HiC_mESC_full_test.csv'
 
     // Genome references
-    genome = 'R64-1-1'
+    genome = 'mm10'
+
+    // Other options
+    digestion = 'dpnii'
+    bin_size = '40000,250000,500000,1000000'
+    res_compartments = '500000,250000'
+    res_tads = '40000,20000'
 }
diff --git a/docs/usage.md b/docs/usage.md
index 79d033b9ffd226a478b7848748803ee62d1400fe..159daa6bd15ec41ac0368264c84f8995ff42e654 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -10,7 +10,7 @@
 
 You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 3 columns, and a header row as shown in the examples below.
 
-```console
+```bash
 --input '[path to samplesheet file]'
 ```
 
@@ -54,7 +54,7 @@ An [example samplesheet](../assets/samplesheet.csv) has been provided with the p
 
 The typical command for running the pipeline is as follows:
 
-```console
+```bash
 nextflow run nf-core/hic --input samplesheet.csv --outdir <OUTDIR> --genome GRCh37 -profile docker
 ```
 
@@ -63,9 +63,9 @@ See below for more information about profiles.
 
 Note that the pipeline will create the following files in your working directory:
 
-```console
+```bash
 work                # Directory containing the nextflow working files
-<OUTIDR>            # Finished results in specified location (defined with --outdir)
+<OUTDIR>            # Finished results in specified location (defined with --outdir)
 .nextflow_log       # Log file from Nextflow
 # Other nextflow hidden files, eg. history of pipeline runs and old logs.
 ```
@@ -74,7 +74,7 @@ work                # Directory containing the nextflow working files
 
 When you run the above command, Nextflow automatically pulls the pipeline code from GitHub and stores it as a cached version. When running the pipeline after this, it will always use the cached version if available - even if the pipeline has been updated since. To make sure that you're running the latest version of the pipeline, make sure that you regularly update the cached version of the pipeline:
 
-```console
+```bash
 nextflow pull nf-core/hic
 ```
 
@@ -82,22 +82,9 @@ nextflow pull nf-core/hic
 
 It is a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since.
 
-First, go to the
-[nf-core/hic releases page](https://github.com/nf-core/hic/releases) and find
-the latest version number - numeric only (eg. `1.3.1`).
-Then specify this when running the pipeline with `-r` (one hyphen)
-eg. `-r 1.3.1`.
-
-This version number will be logged in reports when you run the pipeline, so
-that you'll know what you used when you look back in the future.
+First, go to the [nf-core/hic releases page](https://github.com/nf-core/hic/releases) and find the latest pipeline version - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`. Of course, you can switch to another version by changing the number after the `-r` flag.
 
-### Automatic resubmission
-
-Each step in the pipeline has a default set of requirements for number of CPUs,
-memory and time. For most of the steps in the pipeline, if the job exits with
-an error code of `143` (exceeded requested resources) it will automatically
-resubmit with higher requests (2 x original, then 3 x original). If it still
-fails after three times then the pipeline is stopped.
+This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future. For example, at the bottom of the MultiQC reports.
 
 ## Core Nextflow arguments
 
@@ -109,7 +96,7 @@ fails after three times then the pipeline is stopped.
 Use this parameter to choose a configuration profile. Profiles can give
 configuration presets for different compute environments.
 
-Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below. When using Biocontainers, most of these software packaging methods pull Docker containers from quay.io e.g [FastQC](https://quay.io/repository/biocontainers/fastqc) except for Singularity which directly downloads Singularity images via https hosted by the [Galaxy project](https://depot.galaxyproject.org/singularity/) and Conda which downloads and installs software locally from [Bioconda](https://bioconda.github.io/).
+Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below.
 
 > We highly recommend the use of Docker or Singularity containers for full
 > pipeline reproducibility, however when this is not possible, Conda is also supported.
@@ -127,10 +114,11 @@ the order of arguments is important!
 They are loaded in sequence, so later profiles can overwrite
 earlier profiles.
 
-If `-profile` is not specified, the pipeline will run locally and
-expect all software to be
-installed and available on the `PATH`. This is _not_ recommended.
+If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended, since it can lead to different results on different machines dependent on the computer enviroment.
 
+- `test`
+  - A profile with a complete configuration for automated testing
+  - Includes links to test data so needs no other parameters
 - `docker`
   - A generic configuration profile to be used with [Docker](https://docker.com/)
 - `singularity`
@@ -143,9 +131,6 @@ installed and available on the `PATH`. This is _not_ recommended.
   - A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/)
 - `conda`
   - A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud.
-- `test`
-  - A profile with a complete configuration for automated testing
-  - Includes links to test data so needs no other parameters
 
 ### `-resume`
 
@@ -196,8 +181,14 @@ Work dir:
 Tip: you can replicate the issue by changing to the process work dir and entering the command `bash .command.run`
 ```
 
+#### For beginners
+
+A first step to bypass this error, you could try to increase the amount of CPUs, memory, and time for the whole pipeline. Therefor you can try to increase the resource for the parameters `--max_cpus`, `--max_memory`, and `--max_time`. Based on the error above, you have to increase the amount of memory. Therefore you can go to the [parameter documentation of rnaseq](https://nf-co.re/rnaseq/3.9/parameters) and scroll down to the `show hidden parameter` button to get the default value for `--max_memory`. In this case 128GB, you than can try to run your pipeline again with `--max_memory 200GB -resume` to skip all process, that were already calculated. If you can not increase the resource of the complete pipeline, you can try to adapt the resource for a single process as mentioned below.
+
+#### Advanced option on process level
+
 To bypass this error you would need to find exactly which resources are set by the `STAR_ALIGN` process. The quickest way is to search for `process STAR_ALIGN` in the [nf-core/rnaseq Github repo](https://github.com/nf-core/rnaseq/search?q=process+STAR_ALIGN).
-We have standardised the structure of Nextflow DSL2 pipelines such that all module files will be present in the `modules/` directory and so, based on the search results, the file we want is `modules/nf-core/software/star/align/main.nf`.
+We have standardised the structure of Nextflow DSL2 pipelines such that all module files will be present in the `modules/` directory and so, based on the search results, the file we want is `modules/nf-core/star/align/main.nf`.
 If you click on the link to that file you will notice that there is a `label` directive at the top of the module that is set to [`label process_high`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L9).
 The [Nextflow `label`](https://www.nextflow.io/docs/latest/process.html#label) directive allows us to organise workflow processes in separate groups which can be referenced in a configuration file to select and configure subset of processes having similar computing requirements.
 The default values for the `process_high` label are set in the pipeline's [`base.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L33-L37) which in this case is defined as 72GB.
@@ -216,7 +207,7 @@ process {
 >
 > If you get a warning suggesting that the process selector isn't recognised check that the process name has been specified correctly.
 
-### Updating containers
+### Updating containers (advanced users)
 
 The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. If for some reason you need to use a different version of a particular tool with the pipeline then you just need to identify the `process` name and override the Nextflow `container` definition for that process using the `withName` declaration. For example, in the [nf-core/viralrecon](https://nf-co.re/viralrecon) pipeline a tool called [Pangolin](https://github.com/cov-lineages/pangolin) has been used during the COVID-19 pandemic to assign lineages to SARS-CoV-2 genome sequenced samples. Given that the lineage assignments change quite frequently it doesn't make sense to re-release the nf-core/viralrecon everytime a new version of Pangolin has been released. However, you can override the default container used by the pipeline by creating a custom config file and passing it as a command-line argument via `-c custom.config`.
 
@@ -266,6 +257,14 @@ If you have any questions or issues please send us a message on
 [Slack](https://nf-co.re/join/slack) on the
 [`#configs` channel](https://nfcore.slack.com/channels/configs).
 
+## Azure Resource Requests
+
+To be used with the `azurebatch` profile by specifying the `-profile azurebatch`.
+We recommend providing a compute `params.vm_type` of `Standard_D16_v3` VMs by default but these options can be changed if required.
+
+Note that the choice of VM size depends on your quota and the overall workload during the analysis.
+For a thorough list, please refer the [Azure Sizes for virtual machines in Azure](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes).
+
 ## Running in the background
 
 Nextflow handles job submissions and supervises the running jobs.
@@ -287,7 +286,7 @@ large amount of memory.
 We recommend adding the following line to your environment to limit this
 (typically in `~/.bashrc` or `~./bash_profile`):
 
-```console
+```bash
 NXF_OPTS='-Xms1g -Xmx4g'
 ```
 
diff --git a/lib/NfcoreSchema.groovy b/lib/NfcoreSchema.groovy
index b3d092f8090902661a13b951bb251af4645f8d80..33cd4f6e8df62276afa55ab4c00cd59900ea013e 100755
--- a/lib/NfcoreSchema.groovy
+++ b/lib/NfcoreSchema.groovy
@@ -46,7 +46,6 @@ class NfcoreSchema {
             'quiet',
             'syslog',
             'v',
-            'version',
 
             // Options for `nextflow run` command
             'ansi',
diff --git a/lib/NfcoreTemplate.groovy b/lib/NfcoreTemplate.groovy
index 2fc0a9b9b61d85455653e2abbc91391f8b8606b0..25a0a74a645332b1cacfc99b6e5c9c4500c49423 100755
--- a/lib/NfcoreTemplate.groovy
+++ b/lib/NfcoreTemplate.groovy
@@ -32,6 +32,25 @@ class NfcoreTemplate {
         }
     }
 
+    //
+    // Generate version string
+    //
+    public static String version(workflow) {
+        String version_string = ""
+
+        if (workflow.manifest.version) {
+            def prefix_v = workflow.manifest.version[0] != 'v' ? 'v' : ''
+            version_string += "${prefix_v}${workflow.manifest.version}"
+        }
+
+        if (workflow.commitId) {
+            def git_shortsha = workflow.commitId.substring(0, 7)
+            version_string += "-g${git_shortsha}"
+        }
+
+        return version_string
+    }
+
     //
     // Construct and send completion email
     //
@@ -61,7 +80,7 @@ class NfcoreTemplate {
         misc_fields['Nextflow Compile Timestamp'] = workflow.nextflow.timestamp
 
         def email_fields = [:]
-        email_fields['version']      = workflow.manifest.version
+        email_fields['version']      = NfcoreTemplate.version(workflow)
         email_fields['runName']      = workflow.runName
         email_fields['success']      = workflow.success
         email_fields['dateComplete'] = workflow.complete
@@ -145,6 +164,64 @@ class NfcoreTemplate {
         output_tf.withWriter { w -> w << email_txt }
     }
 
+    //
+    // Construct and send a notification to a web server as JSON
+    // e.g. Microsoft Teams and Slack
+    //
+    public static void IM_notification(workflow, params, summary_params, projectDir, log) {
+        def hook_url = params.hook_url
+
+        def summary = [:]
+        for (group in summary_params.keySet()) {
+            summary << summary_params[group]
+        }
+
+        def misc_fields = [:]
+        misc_fields['start']                                = workflow.start
+        misc_fields['complete']                             = workflow.complete
+        misc_fields['scriptfile']                           = workflow.scriptFile
+        misc_fields['scriptid']                             = workflow.scriptId
+        if (workflow.repository) misc_fields['repository']  = workflow.repository
+        if (workflow.commitId)   misc_fields['commitid']    = workflow.commitId
+        if (workflow.revision)   misc_fields['revision']    = workflow.revision
+        misc_fields['nxf_version']                          = workflow.nextflow.version
+        misc_fields['nxf_build']                            = workflow.nextflow.build
+        misc_fields['nxf_timestamp']                        = workflow.nextflow.timestamp
+
+        def msg_fields = [:]
+        msg_fields['version']      = NfcoreTemplate.version(workflow)
+        msg_fields['runName']      = workflow.runName
+        msg_fields['success']      = workflow.success
+        msg_fields['dateComplete'] = workflow.complete
+        msg_fields['duration']     = workflow.duration
+        msg_fields['exitStatus']   = workflow.exitStatus
+        msg_fields['errorMessage'] = (workflow.errorMessage ?: 'None')
+        msg_fields['errorReport']  = (workflow.errorReport ?: 'None')
+        msg_fields['commandLine']  = workflow.commandLine.replaceFirst(/ +--hook_url +[^ ]+/, "")
+        msg_fields['projectDir']   = workflow.projectDir
+        msg_fields['summary']      = summary << misc_fields
+
+        // Render the JSON template
+        def engine       = new groovy.text.GStringTemplateEngine()
+        // Different JSON depending on the service provider
+        // Defaults to "Adaptive Cards" (https://adaptivecards.io), except Slack which has its own format
+        def json_path     = hook_url.contains("hooks.slack.com") ? "slackreport.json" : "adaptivecard.json"
+        def hf            = new File("$projectDir/assets/${json_path}")
+        def json_template = engine.createTemplate(hf).make(msg_fields)
+        def json_message  = json_template.toString()
+
+        // POST
+        def post = new URL(hook_url).openConnection();
+        post.setRequestMethod("POST")
+        post.setDoOutput(true)
+        post.setRequestProperty("Content-Type", "application/json")
+        post.getOutputStream().write(json_message.getBytes("UTF-8"));
+        def postRC = post.getResponseCode();
+        if (! postRC.equals(200)) {
+            log.warn(post.getErrorStream().getText());
+        }
+    }
+
     //
     // Print pipeline summary on completion
     //
@@ -154,7 +231,7 @@ class NfcoreTemplate {
             if (workflow.stats.ignoredCount == 0) {
                 log.info "-${colors.purple}[$workflow.manifest.name]${colors.green} Pipeline completed successfully${colors.reset}-"
             } else {
-                log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed successfully, but with errored process(es) ${colors.reset}-"
+                log.info "-${colors.purple}[$workflow.manifest.name]${colors.yellow} Pipeline completed successfully, but with errored process(es) ${colors.reset}-"
             }
         } else {
             log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed with errors${colors.reset}-"
@@ -242,6 +319,7 @@ class NfcoreTemplate {
     //
     public static String logo(workflow, monochrome_logs) {
         Map colors = logColours(monochrome_logs)
+        String workflow_version = NfcoreTemplate.version(workflow)
         String.format(
             """\n
             ${dashedLine(monochrome_logs)}
@@ -250,7 +328,7 @@ class NfcoreTemplate {
             ${colors.blue}  |\\ | |__  __ /  ` /  \\ |__) |__         ${colors.yellow}}  {${colors.reset}
             ${colors.blue}  | \\| |       \\__, \\__/ |  \\ |___     ${colors.green}\\`-._,-`-,${colors.reset}
                                                     ${colors.green}`._,._,\'${colors.reset}
-            ${colors.purple}  ${workflow.manifest.name} v${workflow.manifest.version}${colors.reset}
+            ${colors.purple}  ${workflow.manifest.name} ${workflow_version}${colors.reset}
             ${dashedLine(monochrome_logs)}
             """.stripIndent()
         )
diff --git a/lib/Utils.groovy b/lib/Utils.groovy
old mode 100755
new mode 100644
index 28567bd70d63ebdae1340a22458cf8bc5a6fecf2..8d030f4e844bb87ca93fbe4f905e53a833b40840
--- a/lib/Utils.groovy
+++ b/lib/Utils.groovy
@@ -21,19 +21,26 @@ class Utils {
         }
 
         // Check that all channels are present
-        def required_channels = ['conda-forge', 'bioconda', 'defaults']
-        def conda_check_failed = !required_channels.every { ch -> ch in channels }
+        // This channel list is ordered by required channel priority.
+        def required_channels_in_order = ['conda-forge', 'bioconda', 'defaults']
+        def channels_missing = ((required_channels_in_order as Set) - (channels as Set)) as Boolean
 
         // Check that they are in the right order
-        conda_check_failed |= !(channels.indexOf('conda-forge') < channels.indexOf('bioconda'))
-        conda_check_failed |= !(channels.indexOf('bioconda') < channels.indexOf('defaults'))
+        def channel_priority_violation = false
+        def n = required_channels_in_order.size()
+        for (int i = 0; i < n - 1; i++) {
+            channel_priority_violation |= !(channels.indexOf(required_channels_in_order[i]) < channels.indexOf(required_channels_in_order[i+1]))
+        }
 
-        if (conda_check_failed) {
+        if (channels_missing | channel_priority_violation) {
             log.warn "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" +
                 "  There is a problem with your Conda configuration!\n\n" +
                 "  You will need to set-up the conda-forge and bioconda channels correctly.\n" +
-                "  Please refer to https://bioconda.github.io/user/install.html#set-up-channels\n" +
-                "  NB: The order of the channels matters!\n" +
+                "  Please refer to https://bioconda.github.io/\n" +
+                "  The observed channel order is \n" +
+                "  ${channels}\n" +
+                "  but the following channel order is required:\n" +
+                "  ${required_channels_in_order}\n" +
                 "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
         }
     }
diff --git a/lib/WorkflowHic.groovy b/lib/WorkflowHic.groovy
index b4755f41108622089151cf40d381d6e40b8e032b..3dea732db2bb9c05d5002920297d8d727efea9d0 100755
--- a/lib/WorkflowHic.groovy
+++ b/lib/WorkflowHic.groovy
@@ -2,6 +2,8 @@
 // This file holds several functions specific to the workflow/hic.nf in the nf-core/hic pipeline
 //
 
+import groovy.text.SimpleTemplateEngine
+
 class WorkflowHic {
 
     //
@@ -51,7 +53,22 @@ class WorkflowHic {
         return yaml_file_text
     }
 
-    //
+    public static String methodsDescriptionText(run_workflow, mqc_methods_yaml) {
+        // Convert  to a named map so can be used as with familar NXF ${workflow} variable syntax in the MultiQC YML file
+        def meta = [:]
+        meta.workflow = run_workflow.toMap()
+        meta["manifest_map"] = run_workflow.manifest.toMap()
+
+        meta["doi_text"] = meta.manifest_map.doi ? "(doi: <a href=\'https://doi.org/${meta.manifest_map.doi}\'>${meta.manifest_map.doi}</a>)" : ""
+        meta["nodoi_text"] = meta.manifest_map.doi ? "": "<li>If available, make sure to update the text to include the Zenodo DOI of version of the pipeline used. </li>"
+
+        def methods_text = mqc_methods_yaml.text
+
+        def engine =  new SimpleTemplateEngine()
+        def description_html = engine.createTemplate(methods_text).make(meta)
+
+        return description_html
+    }//
     // Exit pipeline if incorrect --genome key provided
     //
     private static void genomeExistsError(params, log) {
diff --git a/lib/WorkflowMain.groovy b/lib/WorkflowMain.groovy
index fd70920b4cc9807708cac168f754d1ae661ab073..239d4fe7253d3a060bd99bff6b7842cdeb2891c8 100755
--- a/lib/WorkflowMain.groovy
+++ b/lib/WorkflowMain.groovy
@@ -18,7 +18,7 @@ class WorkflowMain {
     }
 
     //
-    // Print help to screen if required
+    // Generate help string
     //
     public static String help(workflow, params, log) {
         def command = "nextflow run ${workflow.manifest.name} --input samplesheet.csv --genome GRCh37 -profile docker"
@@ -31,7 +31,7 @@ class WorkflowMain {
     }
 
     //
-    // Print parameter summary log to screen
+    // Generate parameter summary log string
     //
     public static String paramsSummaryLog(workflow, params, log) {
         def summary_log = ''
@@ -52,19 +52,26 @@ class WorkflowMain {
             System.exit(0)
         }
 
-        // Validate workflow parameters via the JSON schema
-        if (params.validate_params) {
-            NfcoreSchema.validateParameters(workflow, params, log)
+        // Print workflow version and exit on --version
+        if (params.version) {
+            String workflow_version = NfcoreTemplate.version(workflow)
+            log.info "${workflow.manifest.name} ${workflow_version}"
+            System.exit(0)
         }
 
         // Print parameter summary log to screen
         log.info paramsSummaryLog(workflow, params, log)
 
+        // Validate workflow parameters via the JSON schema
+        if (params.validate_params) {
+            NfcoreSchema.validateParameters(workflow, params, log)
+        }
+
         // Check that a -profile or Nextflow config has been provided to run the pipeline
         NfcoreTemplate.checkConfigProvided(workflow, log)
 
         // Check that conda channels are set-up correctly
-        if (params.enable_conda) {
+        if (workflow.profile.tokenize(',').intersect(['conda', 'mamba']).size() >= 1) {
             Utils.checkCondaChannels(log)
         }
 
@@ -77,17 +84,15 @@ class WorkflowMain {
             System.exit(1)
         }
     }
-
     //
     // Get attribute from genome config file e.g. fasta
     //
-    public static String getGenomeAttribute(params, attribute) {
-        def val = ''
+    public static Object getGenomeAttribute(params, attribute) {
         if (params.genomes && params.genome && params.genomes.containsKey(params.genome)) {
             if (params.genomes[ params.genome ].containsKey(attribute)) {
-                val = params.genomes[ params.genome ][ attribute ]
+                return params.genomes[ params.genome ][ attribute ]
             }
         }
-        return val
+        return null
     }
 }
diff --git a/main.nf b/main.nf
index 82aaf0f200b77db6710472f821c715376d2e5668..dd564aa4e80652c1a21ba504c0564faddfb74bcd 100644
--- a/main.nf
+++ b/main.nf
@@ -4,6 +4,7 @@
     nf-core/hic
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     Github : https://github.com/nf-core/hic
+
     Website: https://nf-co.re/hic
     Slack  : https://nfcore.slack.com/channels/hic
 ----------------------------------------------------------------------------------------
diff --git a/modules.json b/modules.json
index 4a090fd6e6f1a31b03ba0a8e2fbe9ea1f2633747..45540ca9ea0c4c8493525e58d8671176087348dc 100644
--- a/modules.json
+++ b/modules.json
@@ -2,24 +2,63 @@
     "name": "nf-core/hic",
     "homePage": "https://github.com/nf-core/hic",
     "repos": {
-        "nf-core/modules": {
-            "bowtie2/align": {
-                "git_sha": "61f68913fefc20241ceccb671b104230b2d775d7"
+        "https://github.com/nf-core/modules.git": {
+            "modules": {
+                "nf-core": {
+                    "bowtie2/align": {
+                        "branch": "master",
+                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "installed_by": ["modules"]
+                    },
+                    "bowtie2/build": {
+                        "branch": "master",
+                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "installed_by": ["modules"]
+                    },
+                    "cooler/balance": {
+                        "branch": "master",
+                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "installed_by": ["modules"]
+                    },
+                    "cooler/cload": {
+                        "branch": "master",
+                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "installed_by": ["modules"]
+                    },
+                    "cooler/dump": {
+                        "branch": "master",
+                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "installed_by": ["modules"]
+                    },
+                    "cooler/makebins": {
+                        "branch": "master",
+                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "installed_by": ["modules"]
+                    },
+                    "cooler/zoomify": {
+                        "branch": "master",
+                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "installed_by": ["modules"]
+                    },
+                    "custom/dumpsoftwareversions": {
+                        "branch": "master",
+                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "installed_by": ["modules"]
+                    },
+                    "custom/getchromsizes": {
+                        "branch": "master",
+                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "installed_by": ["modules"]
+                    },
+                    "fastqc": {
+                        "branch": "master",
+                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "installed_by": ["modules"]
+                    }
+                }
             },
-            "bowtie2/build": {
-                "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d"
-            },
-            "cooler/zoomify": {
-                "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d"
-            },
-            "custom/dumpsoftwareversions": {
-                "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d"
-            },
-            "custom/getchromsizes": {
-                "git_sha": "213403187932dbbdd936a04474cc8cd8abae7a08"
-            },
-            "fastqc": {
-                "git_sha": "49b18b1639f4f7104187058866a8fab33332bdfe"
+            "subworkflows": {
+                "nf-core": {}
             }
         }
     }
diff --git a/modules/local/cooltools/eigs-cis.nf b/modules/local/cooltools/eigscis.nf
similarity index 89%
rename from modules/local/cooltools/eigs-cis.nf
rename to modules/local/cooltools/eigscis.nf
index 55d6d1697f80bdd893c95349b827861b05f5bf39..feaa30007099cae34a18eaa8105a3367e260e1b6 100644
--- a/modules/local/cooltools/eigs-cis.nf
+++ b/modules/local/cooltools/eigscis.nf
@@ -2,10 +2,11 @@
  * cooltools - call_compartments
  */
 
-process CALL_COMPARTMENTS {
+process COOLTOOLS_EIGSCIS {
+    tag "${meta.id}"
     label 'process_medium'
 
-    conda (params.enable_conda ? "bioconda::cooltools=0.5.1 bioconda::ucsc-bedgraphtobigwig=377" : null)
+    conda "bioconda::cooltools=0.5.1 bioconda::ucsc-bedgraphtobigwig=377"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/mulled-v2-c81d8d6b6acf4714ffaae1a274527a41958443f6:cc7ea58b8cefc76bed985dcfe261cb276ed9e0cf-0' :
         'quay.io/biocontainers/mulled-v2-c81d8d6b6acf4714ffaae1a274527a41958443f6:cc7ea58b8cefc76bed985dcfe261cb276ed9e0cf-0' }"
diff --git a/modules/local/cooltools/insulation.nf b/modules/local/cooltools/insulation.nf
index d6fd194810163729974567bb0c3bdfc2182dd607..8a9127ea47824f9541c4faf2263d8e61030468c3 100644
--- a/modules/local/cooltools/insulation.nf
+++ b/modules/local/cooltools/insulation.nf
@@ -2,10 +2,11 @@
  * Cooltools - diamond-insulation
  */
 
-process INSULATION {
+process COOLTOOLS_INSULATION {
+    tag "${meta.id}"
     label 'process_medium'
 
-    conda (params.enable_conda ? "bioconda::cooltools=0.5.1" : null)
+    conda "bioconda::cooltools=0.5.1"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/cooltools:0.5.1--py37h37892f8_0' :
         'quay.io/biocontainers/cooltools:0.5.1--py37h37892f8_0' }"
@@ -14,7 +15,7 @@ process INSULATION {
     tuple val(meta), path(cool)
 
     output:
-    path("*tsv"), emit:results
+    path("*tsv"), emit:tsv
     path("versions.yml"), emit:versions
 
     script:
diff --git a/modules/local/hicexplorer/hicFindTADs.nf b/modules/local/hicexplorer/hicFindTADs.nf
index 6946e634f4b0490c4be8cdaa9b2d465e54777bdd..b6cae335c22111407b3d782e95a067ed087645e0 100644
--- a/modules/local/hicexplorer/hicFindTADs.nf
+++ b/modules/local/hicexplorer/hicFindTADs.nf
@@ -5,7 +5,7 @@
 process HIC_FIND_TADS {
     label 'process_medium'
 
-    conda (params.enable_conda ? "bioconda::hicexplorer=3.7.2" : null)
+    conda "bioconda::hicexplorer=3.7.2"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/hicexplorer:3.7.2--pyhdfd78af_1' :
         'quay.io/biocontainers/hicexplorer:3.7.2--pyhdfd78af_1' }"
diff --git a/modules/local/hicexplorer/hicPlotDistVsCounts.nf b/modules/local/hicexplorer/hicPlotDistVsCounts.nf
index 4c7d05f40a4d5cfb486b6ea3748f668d2e88398a..3e37d3fe7b466dbe45b2f19c220e2f9a65a07a9a 100644
--- a/modules/local/hicexplorer/hicPlotDistVsCounts.nf
+++ b/modules/local/hicexplorer/hicPlotDistVsCounts.nf
@@ -1,11 +1,12 @@
 /*
- * hicexplorer - hicPlotDistVsCounts
+ * hicexplorer - Genomic distance/counts plots
  */
 
 process HIC_PLOT_DIST_VS_COUNTS {
+    tag "${meta.id}"
     label 'process_medium'
 
-    conda (params.enable_conda ? "bioconda::hicexplorer=3.7.2" : null)
+    conda "bioconda::hicexplorer=3.7.2"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/hicexplorer:3.7.2--pyhdfd78af_1' :
         'quay.io/biocontainers/hicexplorer:3.7.2--pyhdfd78af_1' }"
diff --git a/modules/local/hicpro/bowtie2_merge.nf b/modules/local/hicpro/bowtie2_merge.nf
index ed200d6a3eee66eccce53a0e1f15dc04eacdc2c9..7a4e96be9aaf2470a10ca19672591e0f84853602 100644
--- a/modules/local/hicpro/bowtie2_merge.nf
+++ b/modules/local/hicpro/bowtie2_merge.nf
@@ -1,8 +1,8 @@
 process MERGE_BOWTIE2{
-    tag "$prefix"
+    tag "${meta.id}"
     label 'process_medium'
 
-    conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null)
+    conda "bioconda::samtools=1.15.1"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
         'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
diff --git a/modules/local/hicpro/build_contact_maps.nf b/modules/local/hicpro/build_contact_maps.nf
index 097ff59a8f51465b4867146a400211664d24067c..fd633a984f26b4cccc901a92e983cf0acfaa8eed 100644
--- a/modules/local/hicpro/build_contact_maps.nf
+++ b/modules/local/hicpro/build_contact_maps.nf
@@ -1,26 +1,27 @@
 process BUILD_CONTACT_MAPS{
-  tag "$meta.id - $res"
+  tag "${meta.id}"
   label 'process_high_memory'
 
-  conda (params.enable_conda ? "conda-forge::sed=4.7" : null)
+  conda "conda-forge::sed=4.7"
   container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
       'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
       'ubuntu:20.04' }"
 
   input:
-  tuple val(meta), path(vpairs), val(res) 
-  path chrsize 
+  tuple val(meta), path(vpairs), val(resolution) 
+  tuple val(meta2), path(chrsize) 
 
   output:
-  tuple val(meta), val(res), path("*.matrix"), path("*.bed"), emit: maps
+  tuple val(meta), val(resolution), path("*.matrix"), path("*.bed"), emit: maps
    
   script:
+  def prefix = task.ext.prefix ?: "${meta.id}"
   """
   build_matrix \\
     --matrix-format upper  \\
-    --binsize ${res} \\
+    --binsize ${resolution} \\
     --chrsizes ${chrsize} \\
     --ifile ${vpairs} \\
-    --oprefix ${meta.id}_${res}
+    --oprefix ${prefix}
   """
 }
diff --git a/modules/local/hicpro/combine_mates.nf b/modules/local/hicpro/combine_mates.nf
index 83b18dc17fd76bc737bf7749a5d28953309c0a6c..da95be819a03560d75db2bc80ab3e8b72bf774aa 100644
--- a/modules/local/hicpro/combine_mates.nf
+++ b/modules/local/hicpro/combine_mates.nf
@@ -1,8 +1,8 @@
 process COMBINE_MATES {
-    tag "$prefix"
+    tag "${meta.id}"
     label 'process_low'
 
-    conda (params.enable_conda ? "conda-forge::python=3.9  bioconda::pysam=0.19.0" : null)
+    conda "conda-forge::python=3.9  bioconda::pysam=0.19.0"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
         'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
diff --git a/modules/local/hicpro/dnase_mapping_stats.nf b/modules/local/hicpro/dnase_mapping_stats.nf
index 950529822dcb16df817185fcfd5e31bdd79df602..64cb0792e4d93dfcc1b1e7d0b5d50bf7e0b75c73 100644
--- a/modules/local/hicpro/dnase_mapping_stats.nf
+++ b/modules/local/hicpro/dnase_mapping_stats.nf
@@ -2,7 +2,7 @@ process MAPPING_STATS_DNASE {
     tag "$sample = $bam"
     label 'process_medium'
 
-    conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null)
+    conda "bioconda::samtools=1.15.1"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
         'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
diff --git a/modules/local/hicpro/get_restriction_fragments.nf b/modules/local/hicpro/get_restriction_fragments.nf
index 055db6a3045dee93da371bdc2e2bbb0f8df65cae..affc2d986a339d239602c6e66ad6f0d50517505e 100644
--- a/modules/local/hicpro/get_restriction_fragments.nf
+++ b/modules/local/hicpro/get_restriction_fragments.nf
@@ -2,17 +2,17 @@ process GET_RESTRICTION_FRAGMENTS {
     tag "$res_site"
     label 'process_low'
 
-    conda (params.enable_conda ? "conda-forge::python=3.9 conda-forge::numpy=1.22.3" : null)
+    conda "conda-forge::python=3.9 conda-forge::numpy=1.22.3"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
         'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
 
     input:
-    path fasta
+    tuple val(meta), path(fasta)
     val(res_site)
 
     output:
-    path "*.bed", emit: results
+    tuple val(meta), path("*.bed"), emit: results
     path("versions.yml"), emit: versions
 
     script:
diff --git a/modules/local/hicpro/get_valid_interaction.nf b/modules/local/hicpro/get_valid_interaction.nf
index 2709cdee3530c26820708ef9fd341c424ea14b9a..cc148371e52e04e96ca8dc9461a46ce80216b85e 100644
--- a/modules/local/hicpro/get_valid_interaction.nf
+++ b/modules/local/hicpro/get_valid_interaction.nf
@@ -2,14 +2,14 @@ process GET_VALID_INTERACTION {
     tag "$meta.id"
     label 'process_low'
 
-    conda (params.enable_conda ? "conda-forge::python=3.9  bioconda::pysam=0.19.0 bioconda::bx-python=0.8.13" : null)
+    conda "conda-forge::python=3.9  bioconda::pysam=0.19.0 bioconda::bx-python=0.8.13"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
         'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
 
     input:
     tuple val(meta), path(bam)
-    path(resfrag)
+    tuple val(meta2), path(resfrag)
 
     output:
     tuple val(meta), path("*.validPairs"), emit:valid_pairs
diff --git a/modules/local/hicpro/get_valid_interaction_dnase.nf b/modules/local/hicpro/get_valid_interaction_dnase.nf
index 08a019f203653b25eaad601026c368e2e283d50c..0cb5883bb5f6fe072f15f439a8e34d9b002ed7f0 100644
--- a/modules/local/hicpro/get_valid_interaction_dnase.nf
+++ b/modules/local/hicpro/get_valid_interaction_dnase.nf
@@ -2,7 +2,7 @@ process GET_VALID_INTERACTION_DNASE {
     tag "$meta.id"
     label 'process_low'
 
-    conda (params.enable_conda ? "conda-forge::python=3.9  bioconda::pysam=0.19.0 bioconda::bx-python=0.8.13" : null)
+    conda "conda-forge::python=3.9  bioconda::pysam=0.19.0 bioconda::bx-python=0.8.13"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
         'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
diff --git a/modules/local/hicpro/hicpro2pairs.nf b/modules/local/hicpro/hicpro2pairs.nf
index aa475c23e150be45e0166ee21103750e5c0f66d6..2ba2a901b62791e733900b24da4f244cd2fbaba7 100644
--- a/modules/local/hicpro/hicpro2pairs.nf
+++ b/modules/local/hicpro/hicpro2pairs.nf
@@ -2,14 +2,14 @@ process HICPRO2PAIRS {
     tag "$meta.id"
     label 'process_medium'
 
-    conda (params.enable_conda ? "bioconda::pairix=0.3.7" : null)
+    conda "bioconda::pairix=0.3.7"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/pairix:0.3.7--py36h30a8e3e_3' :
         'quay.io/biocontainers/pairix:0.3.7--py36h30a8e3e_3' }"
 
     input:
     tuple val(meta), path(vpairs)
-    path chrsize
+    tuple val(meta2), path(chrsize)
 
     output:
     tuple val(meta), path("*.pairs.gz"), path("*.pairs.gz.px2"), emit: pairs
diff --git a/modules/local/hicpro/merge_stats.nf b/modules/local/hicpro/merge_stats.nf
index 743f3740f4a630a506c510cd4879c50844f51b7f..b25dc8a13d2f18bc5ae05742c5f15d77fd19627d 100644
--- a/modules/local/hicpro/merge_stats.nf
+++ b/modules/local/hicpro/merge_stats.nf
@@ -1,7 +1,8 @@
 process MERGE_STATS {
+    tag "${meta.id}"
     label 'process_low'
 
-    conda (params.enable_conda ? "conda-forge::python=3.9" : null)
+    conda "conda-forge::python=3.9"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
         'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
diff --git a/modules/local/hicpro/merge_valid_interaction.nf b/modules/local/hicpro/merge_valid_interaction.nf
index c5b716d4493aad83d6339dffd85e287cfd317104..29e568a20997c0a313a87cf964f1f8c10327228b 100644
--- a/modules/local/hicpro/merge_valid_interaction.nf
+++ b/modules/local/hicpro/merge_valid_interaction.nf
@@ -2,7 +2,7 @@ process MERGE_VALID_INTERACTION {
     tag "$prefix"
     label 'process_high_memory'
 
-    conda (params.enable_conda ? "conda-forge::gawk=5.1.0" : null)
+    conda "conda-forge::gawk=5.1.0"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
         'ubuntu:20.04' }"
diff --git a/modules/local/hicpro/run_ice.nf b/modules/local/hicpro/run_ice.nf
index 996baf1e117dffa39ddc4a3c9769075771dab359..0a435edcd0f2a82c93be2a1b9bd832a721c270cf 100644
--- a/modules/local/hicpro/run_ice.nf
+++ b/modules/local/hicpro/run_ice.nf
@@ -1,8 +1,8 @@
 process ICE_NORMALIZATION{
-    tag "$rmaps"
+    tag "$meta.id"
     label 'process_high_memory'
 
-    conda (params.enable_conda ? "conda-forge::python=3.9  bioconda::iced=0.5.10 conda-forge::numpy=1.22.3" : null)
+    conda "conda-forge::python=3.9 bioconda::iced=0.5.10 conda-forge::numpy=1.22.3"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
         'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
diff --git a/modules/local/hicpro/trim_reads.nf b/modules/local/hicpro/trim_reads.nf
index 1585591f702ec578a91ffe05ade933a8fedf04da..5a96df412ea89845583fc363eccf2df49ba9458d 100644
--- a/modules/local/hicpro/trim_reads.nf
+++ b/modules/local/hicpro/trim_reads.nf
@@ -2,7 +2,7 @@ process TRIM_READS {
     tag "$meta.id"
     label 'process_low'
 
-    conda (params.enable_conda ? "conda-forge::sed=4.7" : null)
+    conda "conda-forge::sed=4.7"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
         'ubuntu:20.04' }"
diff --git a/modules/local/multiqc.nf b/modules/local/multiqc.nf
index 8fa76a16f250b1c5e2f1394508985b4a0b3ab2ab..595dc94d0eebd849d217c25349e4bb253bee64b6 100644
--- a/modules/local/multiqc.nf
+++ b/modules/local/multiqc.nf
@@ -1,10 +1,10 @@
 process MULTIQC {
     label 'process_medium'
 
-    conda (params.enable_conda ? 'bioconda::multiqc=1.12' : null)
+    conda "bioconda::multiqc=1.13"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
-        'https://depot.galaxyproject.org/singularity/multiqc:1.12--pyhdfd78af_0' :
-        'quay.io/biocontainers/multiqc:1.12--pyhdfd78af_0' }"
+        'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' :
+        'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }"
 
     input:
     path multiqc_config
diff --git a/modules/local/samplesheet_check.nf b/modules/local/samplesheet_check.nf
index 31f62fa99c98fdf3938dd5a360c5d4e522479704..1624dfafe6425f07bbd15daf3b2735ea1187c0ec 100644
--- a/modules/local/samplesheet_check.nf
+++ b/modules/local/samplesheet_check.nf
@@ -1,7 +1,8 @@
 process SAMPLESHEET_CHECK {
     tag "$samplesheet"
+    label 'process_single'
 
-    conda (params.enable_conda ? "conda-forge::python=3.8.3" : null)
+    conda "conda-forge::python=3.8.3"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/python:3.8.3' :
         'quay.io/biocontainers/python:3.8.3' }"
@@ -13,6 +14,9 @@ process SAMPLESHEET_CHECK {
     path '*.csv'       , emit: csv
     path "versions.yml", emit: versions
 
+    when:
+    task.ext.when == null || task.ext.when
+
     script: // This script is bundled with the pipeline, in nf-core/hic/bin/
     """
     check_samplesheet.py \\
diff --git a/modules/local/split_cooler_dump.nf b/modules/local/split_cooler_dump.nf
index 38e7357e2222b4216fa346fbf12fad3004b1e209..8b9e45b31d304545ea62946f4c4e0eacb7860002 100644
--- a/modules/local/split_cooler_dump.nf
+++ b/modules/local/split_cooler_dump.nf
@@ -2,12 +2,20 @@ process SPLIT_COOLER_DUMP {
     tag "$meta.id"
     label 'process_low'
 
+    conda "conda-forge::gawk=5.1.0"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
+        'ubuntu:20.04' }"
+
     input:
     tuple val(meta), path(bedpe)
 
     output:
-    path "*.txt", emit: matrix
-    path "versions.yml", emit: versions
+    tuple val(meta), path("*.txt"), emit: matrix
+    path ("versions.yml"), emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
 
     script:
     def args = task.ext.args ?: ''
diff --git a/modules/nf-core/bowtie2/align/main.nf b/modules/nf-core/bowtie2/align/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..3d851866a30b4f629881864516f0789f6cae9443
--- /dev/null
+++ b/modules/nf-core/bowtie2/align/main.nf
@@ -0,0 +1,71 @@
+process BOWTIE2_ALIGN {
+    tag "$meta.id"
+    label "process_high"
+
+    conda "bioconda::bowtie2=2.4.4 bioconda::samtools=1.16.1 conda-forge::pigz=2.6"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:a0ffedb52808e102887f6ce600d092675bf3528a-0' :
+        'quay.io/biocontainers/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:a0ffedb52808e102887f6ce600d092675bf3528a-0' }"
+
+    input:
+    tuple val(meta) , path(reads)
+    tuple val(meta2), path(index)
+    val   save_unaligned
+    val   sort_bam
+
+    output:
+    tuple val(meta), path("*.bam")    , emit: bam
+    tuple val(meta), path("*.log")    , emit: log
+    tuple val(meta), path("*fastq.gz"), emit: fastq, optional:true
+    path  "versions.yml"              , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ""
+    def args2 = task.ext.args2 ?: ""
+    def prefix = task.ext.prefix ?: "${meta.id}"
+
+    def unaligned = ""
+    def reads_args = ""
+    if (meta.single_end) {
+        unaligned = save_unaligned ? "--un-gz ${prefix}.unmapped.fastq.gz" : ""
+        reads_args = "-U ${reads}"
+    } else {
+        unaligned = save_unaligned ? "--un-conc-gz ${prefix}.unmapped.fastq.gz" : ""
+        reads_args = "-1 ${reads[0]} -2 ${reads[1]}"
+    }
+
+    def samtools_command = sort_bam ? 'sort' : 'view'
+
+    """
+    INDEX=`find -L ./ -name "*.rev.1.bt2" | sed "s/\\.rev.1.bt2\$//"`
+    [ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed "s/\\.rev.1.bt2l\$//"`
+    [ -z "\$INDEX" ] && echo "Bowtie2 index files not found" 1>&2 && exit 1
+
+    bowtie2 \\
+        -x \$INDEX \\
+        $reads_args \\
+        --threads $task.cpus \\
+        $unaligned \\
+        $args \\
+        2> ${prefix}.bowtie2.log \\
+        | samtools $samtools_command $args2 --threads $task.cpus -o ${prefix}.bam -
+
+    if [ -f ${prefix}.unmapped.fastq.1.gz ]; then
+        mv ${prefix}.unmapped.fastq.1.gz ${prefix}.unmapped_1.fastq.gz
+    fi
+
+    if [ -f ${prefix}.unmapped.fastq.2.gz ]; then
+        mv ${prefix}.unmapped.fastq.2.gz ${prefix}.unmapped_2.fastq.gz
+    fi
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        bowtie2: \$(echo \$(bowtie2 --version 2>&1) | sed 's/^.*bowtie2-align-s version //; s/ .*\$//')
+        samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
+        pigz: \$( pigz --version 2>&1 | sed 's/pigz //g' )
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/modules/bowtie2/align/meta.yml b/modules/nf-core/bowtie2/align/meta.yml
similarity index 74%
rename from modules/nf-core/modules/bowtie2/align/meta.yml
rename to modules/nf-core/bowtie2/align/meta.yml
index f80421eca42c51c7238770d44466f9de978e01ae..c8e9a001290d0feddaa121424a5a9c65ae568396 100644
--- a/modules/nf-core/modules/bowtie2/align/meta.yml
+++ b/modules/nf-core/bowtie2/align/meta.yml
@@ -2,7 +2,9 @@ name: bowtie2_align
 description: Align reads to a reference genome using bowtie2
 keywords:
   - align
+  - map
   - fasta
+  - fastq
   - genome
   - reference
 tools:
@@ -25,10 +27,24 @@ input:
       description: |
         List of input FastQ files of size 1 and 2 for single-end and paired-end data,
         respectively.
+  - meta2:
+      type: map
+      description: |
+        Groovy Map containing reference information
+        e.g. [ id:'test', single_end:false ]
   - index:
       type: file
       description: Bowtie2 genome index files
       pattern: "*.ebwt"
+  - save_unaligned:
+      type: boolean
+      description: |
+        Save reads that do not map to the reference (true) or discard them (false)
+        (default: false)
+  - sort_bam:
+      type: boolean
+      description: use samtools sort (true) or samtools view (false)
+      pattern: "true or false"
 output:
   - bam:
       type: file
diff --git a/modules/nf-core/modules/bowtie2/build/main.nf b/modules/nf-core/bowtie2/build/main.nf
similarity index 80%
rename from modules/nf-core/modules/bowtie2/build/main.nf
rename to modules/nf-core/bowtie2/build/main.nf
index a4da62d073b5353cfdd6bff727cc12a86692706b..551893af3b3bb1a3955f1cc7c0b50735c6fded8d 100644
--- a/modules/nf-core/modules/bowtie2/build/main.nf
+++ b/modules/nf-core/bowtie2/build/main.nf
@@ -2,17 +2,17 @@ process BOWTIE2_BUILD {
     tag "$fasta"
     label 'process_high'
 
-    conda (params.enable_conda ? 'bioconda::bowtie2=2.4.4' : null)
+    conda "bioconda::bowtie2=2.4.4"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/bowtie2:2.4.4--py39hbb4e92a_0' :
         'quay.io/biocontainers/bowtie2:2.4.4--py39hbb4e92a_0' }"
 
     input:
-    path fasta
+    tuple val(meta), path(fasta)
 
     output:
-    path 'bowtie2'      , emit: index
-    path "versions.yml" , emit: versions
+    tuple val(meta), path('bowtie2')    , emit: index
+    path "versions.yml"                 , emit: versions
 
     when:
     task.ext.when == null || task.ext.when
diff --git a/modules/nf-core/modules/bowtie2/build/meta.yml b/modules/nf-core/bowtie2/build/meta.yml
similarity index 74%
rename from modules/nf-core/modules/bowtie2/build/meta.yml
rename to modules/nf-core/bowtie2/build/meta.yml
index 2da9a217163fae0e1b392af5ce6473c62e074c25..0240224d532af842d762c5c947fff6e84e5be113 100644
--- a/modules/nf-core/modules/bowtie2/build/meta.yml
+++ b/modules/nf-core/bowtie2/build/meta.yml
@@ -16,10 +16,20 @@ tools:
       doi: 10.1038/nmeth.1923
       licence: ["GPL-3.0-or-later"]
 input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing reference information
+        e.g. [ id:'test', single_end:false ]
   - fasta:
       type: file
       description: Input genome fasta file
 output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing reference information
+        e.g. [ id:'test', single_end:false ]
   - index:
       type: file
       description: Bowtie2 genome index files
diff --git a/modules/local/cooler/balance.nf b/modules/nf-core/cooler/balance/main.nf
similarity index 69%
rename from modules/local/cooler/balance.nf
rename to modules/nf-core/cooler/balance/main.nf
index d03959a6ed764e79af2377ad0c501f1bc1b92b5b..4173a3c1291df7e25e3529d9ab8f2b6f8d1240b7 100644
--- a/modules/local/cooler/balance.nf
+++ b/modules/nf-core/cooler/balance/main.nf
@@ -2,9 +2,9 @@ process COOLER_BALANCE {
     tag "$meta.id"
     label 'process_high'
 
-    conda (params.enable_conda ? "bioconda::cooler=0.8.11" : null)
+    conda "bioconda::cooler=0.8.11"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
-        'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0' :
+        'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0':
         'quay.io/biocontainers/cooler:0.8.11--pyh3252c3a_0' }"
 
     input:
@@ -14,18 +14,22 @@ process COOLER_BALANCE {
     tuple val(meta), path("${prefix}.${extension}"), emit: cool
     path "versions.yml"                            , emit: versions
 
+    when:
+    task.ext.when == null || task.ext.when
+
     script:
     def args = task.ext.args ?: ''
-    prefix = task.ext.prefix ?: "${cool.baseName}_balanced"
-    suffix = resolution ? "::$resolution" : ""
+    prefix = task.ext.prefix ?: "${meta.id}"
+    suffix = resolution ? "::/resolutions/$resolution" : ""
     extension = cool.getExtension()
+    if ("$cool" == "${prefix}.${extension}") error "Input and output names are the same, use \"task.ext.prefix\" to disambiguate!"
     """
+    cp ${cool} ${prefix}.${extension}
+
     cooler balance \\
         $args \\
         -p ${task.cpus} \\
-        ${cool}${suffix}
-
-    cp ${cool} ${prefix}.${extension}
+        ${prefix}.${extension}${suffix}
 
     cat <<-END_VERSIONS > versions.yml
     "${task.process}":
diff --git a/modules/nf-core/cooler/balance/meta.yml b/modules/nf-core/cooler/balance/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..af1a780002701ad6bbf7d64105e5e8153098f5ec
--- /dev/null
+++ b/modules/nf-core/cooler/balance/meta.yml
@@ -0,0 +1,45 @@
+name: "cooler_balance"
+description: Run matrix balancing on a cool file
+keywords:
+  - balance
+tools:
+  - "cooler":
+      description: Sparse binary format for genomic interaction matrices
+      homepage: https://open2c.github.io/cooler/
+      documentation: https://cooler.readthedocs.io/en/latest/index.html
+      tool_dev_url: https://github.com/open2c/cooler
+      doi: "10.1093/bioinformatics/btz540"
+      licence: ["BSD-3-Clause"]
+
+input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - cool:
+      type: file
+      description: Path to COOL file
+      pattern: "*.{cool,mcool}"
+  - resolution:
+      type: value
+      description: Resolution
+
+output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+  - cool:
+      type: file
+      description: Output COOL file balancing weigths
+      pattern: "*.cool"
+
+authors:
+  - "@nservant"
+  - "@muffato"
diff --git a/modules/local/cooler/cload.nf b/modules/nf-core/cooler/cload/main.nf
similarity index 91%
rename from modules/local/cooler/cload.nf
rename to modules/nf-core/cooler/cload/main.nf
index 2dd29b7234be38112205d3332f4126495030581d..80d61f07336f81233a6bd8ffb116e5a25e57048d 100644
--- a/modules/local/cooler/cload.nf
+++ b/modules/nf-core/cooler/cload/main.nf
@@ -2,7 +2,7 @@ process COOLER_CLOAD {
     tag "$meta.id"
     label 'process_high'
 
-    conda (params.enable_conda ? "bioconda::cooler=0.8.11" : null)
+    conda "bioconda::cooler=0.8.11"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0' :
         'quay.io/biocontainers/cooler:0.8.11--pyh3252c3a_0' }"
@@ -29,7 +29,7 @@ process COOLER_CLOAD {
         $nproc \\
         ${chromsizes}:${cool_bin} \\
         $pairs \\
-        ${prefix}.${cool_bin}.cool
+        ${prefix}.cool
 
     cat <<-END_VERSIONS > versions.yml
     "${task.process}":
diff --git a/modules/nf-core/cooler/cload/meta.yml b/modules/nf-core/cooler/cload/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8513aaec1ed8558f850367f453ee55eb9d378b11
--- /dev/null
+++ b/modules/nf-core/cooler/cload/meta.yml
@@ -0,0 +1,53 @@
+name: cooler_cload
+description: Create a cooler from genomic pairs and bins
+keywords:
+  - cool
+tools:
+  - cooler:
+      description: Sparse binary format for genomic interaction matrices
+      homepage: https://open2c.github.io/cooler/
+      documentation: https://cooler.readthedocs.io/en/latest/index.html
+      tool_dev_url: https://github.com/open2c/cooler
+      doi: "10.1093/bioinformatics/btz540"
+      licence: ["BSD-3-clause"]
+
+input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - pairs:
+      type: file
+      description: Path to contacts (i.e. read pairs) file.
+  - index:
+      type: file
+      description: Path to index file of the contacts.
+  - cool_bin:
+      type: value
+      description: Bins size in bp
+  - chromsizes:
+      type: file
+      description: Path to a chromsizes file.
+
+output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - version:
+      type: file
+      description: File containing software version
+      pattern: "versions.yml"
+  - cool:
+      type: file
+      description: Output COOL file path
+      pattern: "*.cool"
+  - cool_bin:
+      type: value
+      description: Bins size in bp
+
+authors:
+  - "@jianhong"
+  - "@muffato"
diff --git a/modules/local/cooler/dump.nf b/modules/nf-core/cooler/dump/main.nf
similarity index 88%
rename from modules/local/cooler/dump.nf
rename to modules/nf-core/cooler/dump/main.nf
index 0c01c8dbeab356f7a277426d4fd1cad36d946b50..b46c78cf35ec6b40276e2d792635f97b9909ab55 100644
--- a/modules/local/cooler/dump.nf
+++ b/modules/nf-core/cooler/dump/main.nf
@@ -2,7 +2,7 @@ process COOLER_DUMP {
     tag "$meta.id"
     label 'process_high'
 
-    conda (params.enable_conda ? "bioconda::cooler=0.8.11" : null)
+    conda "bioconda::cooler=0.8.11"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0' :
         'quay.io/biocontainers/cooler:0.8.11--pyh3252c3a_0' }"
@@ -20,7 +20,7 @@ process COOLER_DUMP {
     script:
     def args = task.ext.args ?: ''
     def prefix = task.ext.prefix ?: "${meta.id}"
-    def suffix = resolution ? "::$resolution" : ""
+    def suffix = resolution ? "::/resolutions/$resolution" : ""
     """
     cooler dump \\
         $args \\
diff --git a/modules/nf-core/cooler/dump/meta.yml b/modules/nf-core/cooler/dump/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fe60523eb3556a6bb2187be0563c3fd6f2cbf5cf
--- /dev/null
+++ b/modules/nf-core/cooler/dump/meta.yml
@@ -0,0 +1,45 @@
+name: cooler_dump
+description: Dump a cooler’s data to a text stream.
+keywords:
+  - dump
+tools:
+  - cooler:
+      description: Sparse binary format for genomic interaction matrices
+      homepage: https://open2c.github.io/cooler/
+      documentation: https://cooler.readthedocs.io/en/latest/index.html
+      tool_dev_url: https://github.com/open2c/cooler
+      doi: "10.1093/bioinformatics/btz540"
+      licence: ["BSD-3-Clause"]
+
+input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - cool:
+      type: file
+      description: Path to COOL file
+      pattern: "*.{cool,mcool}"
+  - resolution:
+      type: value
+      description: Resolution
+
+output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+  - bedpe:
+      type: file
+      description: Output text file
+      pattern: "*.bedpe"
+
+authors:
+  - "@jianhong"
+  - "@muffato"
diff --git a/modules/local/cooler/makebins.nf b/modules/nf-core/cooler/makebins/main.nf
similarity index 57%
rename from modules/local/cooler/makebins.nf
rename to modules/nf-core/cooler/makebins/main.nf
index 09147d45109b01fe256f59fcf7fdafcfaad33960..7f0826197e99e9e79107359a8c65d7d60346dc49 100644
--- a/modules/local/cooler/makebins.nf
+++ b/modules/nf-core/cooler/makebins/main.nf
@@ -1,26 +1,30 @@
 process COOLER_MAKEBINS {
-    tag "${cool_bin}"
+    tag "${meta.id}}"
     label 'process_low'
 
-    conda (params.enable_conda ? "bioconda::cooler=0.8.11" : null)
+    conda "bioconda::cooler=0.8.11"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
-        'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0' :
+        'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0':
         'quay.io/biocontainers/cooler:0.8.11--pyh3252c3a_0' }"
 
     input:
-    tuple path(chromsizes), val(cool_bin)
+    tuple val(meta), path(chromsizes), val(cool_bin)
 
     output:
-    path ("*.bed")       , emit: bed
-    path ("versions.yml"), emit: versions
+    tuple val(meta), path("*.bed"), emit: bed
+    path "versions.yml"           , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
 
     script:
-    def args = task.ext.args ?: ''
+    def args   = task.ext.args   ?: ''
+    def prefix = task.ext.prefix ?: "${meta.id}"
     """
     cooler makebins \\
         $args \\
         ${chromsizes} \\
-        ${cool_bin} > cooler_bins_${cool_bin}.bed
+        ${cool_bin} > ${prefix}.bed
 
     cat <<-END_VERSIONS > versions.yml
     "${task.process}":
diff --git a/modules/nf-core/cooler/makebins/meta.yml b/modules/nf-core/cooler/makebins/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..33fd8eb63f4a3225d3d03eb028578b2c1eeeaa5d
--- /dev/null
+++ b/modules/nf-core/cooler/makebins/meta.yml
@@ -0,0 +1,34 @@
+name: "cooler_makebins"
+description: Generate fixed-width genomic bins
+keywords:
+  - makebins
+tools:
+  - "cooler":
+      description: Sparse binary format for genomic interaction matrices
+      homepage: https://open2c.github.io/cooler/
+      documentation: https://cooler.readthedocs.io/en/latest/index.html
+      tool_dev_url: https://github.com/open2c/cooler
+      doi: "10.1093/bioinformatics/btz540"
+      licence: ["BSD-3-Clause"]
+
+input:
+  - chromsize:
+      type: file
+      description: Path to chromosome size file
+  - cool_bin:
+      type: value
+      description: Resolution (bin size) in base pairs
+
+output:
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+  - bed:
+      type: file
+      description: Genome segmentation at a fixed resolution as a BED file.
+      pattern: "*.bed"
+
+authors:
+  - "@nservant"
+  - "@muffato"
diff --git a/modules/nf-core/modules/cooler/zoomify/main.nf b/modules/nf-core/cooler/zoomify/main.nf
similarity index 93%
rename from modules/nf-core/modules/cooler/zoomify/main.nf
rename to modules/nf-core/cooler/zoomify/main.nf
index 942282c0d6a8ab161502a375863185579bae9f47..f1cd8df79d02b8630c31de0620521cef0cdd6df0 100644
--- a/modules/nf-core/modules/cooler/zoomify/main.nf
+++ b/modules/nf-core/cooler/zoomify/main.nf
@@ -2,7 +2,7 @@ process COOLER_ZOOMIFY {
     tag "$meta.id"
     label 'process_high'
 
-    conda (params.enable_conda ? "bioconda::cooler=0.8.11" : null)
+    conda "bioconda::cooler=0.8.11"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0' :
         'quay.io/biocontainers/cooler:0.8.11--pyh3252c3a_0' }"
diff --git a/modules/nf-core/modules/cooler/zoomify/meta.yml b/modules/nf-core/cooler/zoomify/meta.yml
similarity index 93%
rename from modules/nf-core/modules/cooler/zoomify/meta.yml
rename to modules/nf-core/cooler/zoomify/meta.yml
index d9e12b0587d3500e9592402bd5788615cad9d17a..57f554861b25d4ca9edac66c73ff306a4d9f9390 100644
--- a/modules/nf-core/modules/cooler/zoomify/meta.yml
+++ b/modules/nf-core/cooler/zoomify/meta.yml
@@ -5,7 +5,7 @@ keywords:
 tools:
   - cooler:
       description: Sparse binary format for genomic interaction matrices
-      homepage: https://cooler.readthedocs.io/en/latest/index.html
+      homepage: https://open2c.github.io/cooler/
       documentation: https://cooler.readthedocs.io/en/latest/index.html
       tool_dev_url: https://github.com/open2c/cooler
       doi: "10.1093/bioinformatics/btz540"
diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/main.nf b/modules/nf-core/custom/dumpsoftwareversions/main.nf
similarity index 79%
rename from modules/nf-core/modules/custom/dumpsoftwareversions/main.nf
rename to modules/nf-core/custom/dumpsoftwareversions/main.nf
index 327d5100560d84eb0020b60acf0db2922497991b..3df21765b90921413962c3bb5ca44d117d829297 100644
--- a/modules/nf-core/modules/custom/dumpsoftwareversions/main.nf
+++ b/modules/nf-core/custom/dumpsoftwareversions/main.nf
@@ -1,11 +1,11 @@
 process CUSTOM_DUMPSOFTWAREVERSIONS {
-    label 'process_low'
+    label 'process_single'
 
     // Requires `pyyaml` which does not have a dedicated container but is in the MultiQC container
-    conda (params.enable_conda ? "bioconda::multiqc=1.11" : null)
+    conda "bioconda::multiqc=1.13"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
-        'https://depot.galaxyproject.org/singularity/multiqc:1.11--pyhdfd78af_0' :
-        'quay.io/biocontainers/multiqc:1.11--pyhdfd78af_0' }"
+        'https://depot.galaxyproject.org/singularity/multiqc:1.13--pyhdfd78af_0' :
+        'quay.io/biocontainers/multiqc:1.13--pyhdfd78af_0' }"
 
     input:
     path versions
diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml b/modules/nf-core/custom/dumpsoftwareversions/meta.yml
similarity index 100%
rename from modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml
rename to modules/nf-core/custom/dumpsoftwareversions/meta.yml
diff --git a/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
new file mode 100755
index 0000000000000000000000000000000000000000..b83b32c4d4242764a439f4d51e0255711f9cd40f
--- /dev/null
+++ b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+
+
+"""Provide functions to merge multiple versions.yml files."""
+
+import platform
+from textwrap import dedent
+
+import yaml
+
+
+def _make_versions_html(versions):
+    """Generate a tabular HTML output of all versions for MultiQC."""
+    html = [
+        dedent(
+            """\\
+            <style>
+            #nf-core-versions tbody:nth-child(even) {
+                background-color: #f2f2f2;
+            }
+            </style>
+            <table class="table" style="width:100%" id="nf-core-versions">
+                <thead>
+                    <tr>
+                        <th> Process Name </th>
+                        <th> Software </th>
+                        <th> Version  </th>
+                    </tr>
+                </thead>
+            """
+        )
+    ]
+    for process, tmp_versions in sorted(versions.items()):
+        html.append("<tbody>")
+        for i, (tool, version) in enumerate(sorted(tmp_versions.items())):
+            html.append(
+                dedent(
+                    f"""\\
+                    <tr>
+                        <td><samp>{process if (i == 0) else ''}</samp></td>
+                        <td><samp>{tool}</samp></td>
+                        <td><samp>{version}</samp></td>
+                    </tr>
+                    """
+                )
+            )
+        html.append("</tbody>")
+    html.append("</table>")
+    return "\\n".join(html)
+
+
+def main():
+    """Load all version files and generate merged output."""
+    versions_this_module = {}
+    versions_this_module["${task.process}"] = {
+        "python": platform.python_version(),
+        "yaml": yaml.__version__,
+    }
+
+    with open("$versions") as f:
+        versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module
+
+    # aggregate versions by the module name (derived from fully-qualified process name)
+    versions_by_module = {}
+    for process, process_versions in versions_by_process.items():
+        module = process.split(":")[-1]
+        try:
+            if versions_by_module[module] != process_versions:
+                raise AssertionError(
+                    "We assume that software versions are the same between all modules. "
+                    "If you see this error-message it means you discovered an edge-case "
+                    "and should open an issue in nf-core/tools. "
+                )
+        except KeyError:
+            versions_by_module[module] = process_versions
+
+    versions_by_module["Workflow"] = {
+        "Nextflow": "$workflow.nextflow.version",
+        "$workflow.manifest.name": "$workflow.manifest.version",
+    }
+
+    versions_mqc = {
+        "id": "software_versions",
+        "section_name": "${workflow.manifest.name} Software Versions",
+        "section_href": "https://github.com/${workflow.manifest.name}",
+        "plot_type": "html",
+        "description": "are collected at run time from the software output.",
+        "data": _make_versions_html(versions_by_module),
+    }
+
+    with open("software_versions.yml", "w") as f:
+        yaml.dump(versions_by_module, f, default_flow_style=False)
+    with open("software_versions_mqc.yml", "w") as f:
+        yaml.dump(versions_mqc, f, default_flow_style=False)
+
+    with open("versions.yml", "w") as f:
+        yaml.dump(versions_this_module, f, default_flow_style=False)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/modules/nf-core/custom/getchromsizes/main.nf b/modules/nf-core/custom/getchromsizes/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..580f87feade7280bb8d520551aa61f0021b3b88d
--- /dev/null
+++ b/modules/nf-core/custom/getchromsizes/main.nf
@@ -0,0 +1,44 @@
+process CUSTOM_GETCHROMSIZES {
+    tag "$fasta"
+    label 'process_single'
+
+    conda "bioconda::samtools=1.16.1"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/samtools:1.16.1--h6899075_1' :
+        'quay.io/biocontainers/samtools:1.16.1--h6899075_1' }"
+
+    input:
+    tuple val(meta), path(fasta)
+
+    output:
+    tuple val(meta), path ("*.sizes"), emit: sizes
+    tuple val(meta), path ("*.fai")  , emit: fai
+    tuple val(meta), path ("*.gzi")  , emit: gzi, optional: true
+    path  "versions.yml"             , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ''
+    """
+    samtools faidx $fasta
+    cut -f 1,2 ${fasta}.fai > ${fasta}.sizes
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        getchromsizes: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
+    END_VERSIONS
+    """
+
+    stub:
+    """
+    touch ${fasta}.fai
+    touch ${fasta}.sizes
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        getchromsizes: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/modules/custom/getchromsizes/meta.yml b/modules/nf-core/custom/getchromsizes/meta.yml
similarity index 62%
rename from modules/nf-core/modules/custom/getchromsizes/meta.yml
rename to modules/nf-core/custom/getchromsizes/meta.yml
index ee6c257185a41746801ae1cccaaef1d31379ddd7..219ca1d8e07166c23e28089ae5067b5937cc6f8d 100644
--- a/modules/nf-core/modules/custom/getchromsizes/meta.yml
+++ b/modules/nf-core/custom/getchromsizes/meta.yml
@@ -14,12 +14,22 @@ tools:
       licence: ["MIT"]
 
 input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
   - fasta:
       type: file
       description: FASTA file
-      pattern: "*.{fasta}"
+      pattern: "*.{fa,fasta,fna,fas}"
 
 output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
   - sizes:
       type: file
       description: File containing chromosome lengths
@@ -28,11 +38,16 @@ output:
       type: file
       description: FASTA index file
       pattern: "*.{fai}"
+  - gzi:
+      type: file
+      description: Optional gzip index file for compressed inputs
+      pattern: "*.gzi"
   - versions:
       type: file
-      description: File containing software version
+      description: File containing software versions
       pattern: "versions.yml"
 
 authors:
   - "@tamara-hodgetts"
   - "@chris-cheshire"
+  - "@muffato"
diff --git a/modules/nf-core/fastqc/main.nf b/modules/nf-core/fastqc/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..9ae5838158b28d2ae49270133fbbfe0ea673e991
--- /dev/null
+++ b/modules/nf-core/fastqc/main.nf
@@ -0,0 +1,51 @@
+process FASTQC {
+    tag "$meta.id"
+    label 'process_medium'
+
+    conda "bioconda::fastqc=0.11.9"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/fastqc:0.11.9--0' :
+        'quay.io/biocontainers/fastqc:0.11.9--0' }"
+
+    input:
+    tuple val(meta), path(reads)
+
+    output:
+    tuple val(meta), path("*.html"), emit: html
+    tuple val(meta), path("*.zip") , emit: zip
+    path  "versions.yml"           , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ''
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    // Make list of old name and new name pairs to use for renaming in the bash while loop
+    def old_new_pairs = reads instanceof Path || reads.size() == 1 ? [[ reads, "${prefix}.${reads.extension}" ]] : reads.withIndex().collect { entry, index -> [ entry, "${prefix}_${index + 1}.${entry.extension}" ] }
+    def rename_to = old_new_pairs*.join(' ').join(' ')
+    def renamed_files = old_new_pairs.collect{ old_name, new_name -> new_name }.join(' ')
+    """
+    printf "%s %s\\n" $rename_to | while read old_name new_name; do
+        [ -f "\${new_name}" ] || ln -s \$old_name \$new_name
+    done
+    fastqc $args --threads $task.cpus $renamed_files
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        fastqc: \$( fastqc --version | sed -e "s/FastQC v//g" )
+    END_VERSIONS
+    """
+
+    stub:
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    """
+    touch ${prefix}.html
+    touch ${prefix}.zip
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        fastqc: \$( fastqc --version | sed -e "s/FastQC v//g" )
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/modules/fastqc/meta.yml b/modules/nf-core/fastqc/meta.yml
similarity index 100%
rename from modules/nf-core/modules/fastqc/meta.yml
rename to modules/nf-core/fastqc/meta.yml
diff --git a/modules/nf-core/modules/bowtie2/align/main.nf b/modules/nf-core/modules/bowtie2/align/main.nf
deleted file mode 100644
index c233f955d10b8812541b80725939b4d1768e0d32..0000000000000000000000000000000000000000
--- a/modules/nf-core/modules/bowtie2/align/main.nf
+++ /dev/null
@@ -1,81 +0,0 @@
-process BOWTIE2_ALIGN {
-    tag "$meta.id"
-    label 'process_high'
-
-    conda (params.enable_conda ? 'bioconda::bowtie2=2.4.4 bioconda::samtools=1.15.1 conda-forge::pigz=2.6' : null)
-    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
-        'https://depot.galaxyproject.org/singularity/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:1744f68fe955578c63054b55309e05b41c37a80d-0' :
-        'quay.io/biocontainers/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:1744f68fe955578c63054b55309e05b41c37a80d-0' }"
-
-    input:
-    tuple val(meta), path(reads)
-    path  index
-    val   save_unaligned
-
-    output:
-    tuple val(meta), path('*.bam')    , emit: bam
-    tuple val(meta), path('*.log')    , emit: log
-    tuple val(meta), path('*fastq.gz'), emit: fastq, optional:true
-    path  "versions.yml"              , emit: versions
-
-    when:
-    task.ext.when == null || task.ext.when
-
-    script:
-    def args = task.ext.args ?: ''
-    def args2 = task.ext.args2 ?: ''
-    def prefix = task.ext.prefix ?: "${meta.id}"
-    if (meta.single_end) {
-        def unaligned = save_unaligned ? "--un-gz ${prefix}.unmapped.fastq.gz" : ''
-        """
-        INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
-        [ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed 's/.rev.1.bt2l//'`
-        [ -z "\$INDEX" ] && echo "BT2 index files not found" 1>&2 && exit 1
-        bowtie2 \\
-            -x \$INDEX \\
-            -U $reads \\
-            --threads $task.cpus \\
-            $unaligned \\
-            $args \\
-            2> ${prefix}.bowtie2.log \\
-            | samtools view -@ $task.cpus $args2 -bhS -o ${prefix}.bam -
-
-        cat <<-END_VERSIONS > versions.yml
-        "${task.process}":
-            bowtie2: \$(echo \$(bowtie2 --version 2>&1) | sed 's/^.*bowtie2-align-s version //; s/ .*\$//')
-            samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
-            pigz: \$( pigz --version 2>&1 | sed 's/pigz //g' )
-        END_VERSIONS
-        """
-    } else {
-        def unaligned = save_unaligned ? "--un-conc-gz ${prefix}.unmapped.fastq.gz" : ''
-        """
-        INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
-        [ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed 's/.rev.1.bt2l//'`
-        [ -z "\$INDEX" ] && echo "BT2 index files not found" 1>&2 && exit 1
-        bowtie2 \\
-            -x \$INDEX \\
-            -1 ${reads[0]} \\
-            -2 ${reads[1]} \\
-            --threads $task.cpus \\
-            $unaligned \\
-            $args \\
-            2> ${prefix}.bowtie2.log \\
-            | samtools view -@ $task.cpus $args2 -bhS -o ${prefix}.bam -
-
-        if [ -f ${prefix}.unmapped.fastq.1.gz ]; then
-            mv ${prefix}.unmapped.fastq.1.gz ${prefix}.unmapped_1.fastq.gz
-        fi
-        if [ -f ${prefix}.unmapped.fastq.2.gz ]; then
-            mv ${prefix}.unmapped.fastq.2.gz ${prefix}.unmapped_2.fastq.gz
-        fi
-
-        cat <<-END_VERSIONS > versions.yml
-        "${task.process}":
-            bowtie2: \$(echo \$(bowtie2 --version 2>&1) | sed 's/^.*bowtie2-align-s version //; s/ .*\$//')
-            samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
-            pigz: \$( pigz --version 2>&1 | sed 's/pigz //g' )
-        END_VERSIONS
-        """
-    }
-}
diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py b/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
deleted file mode 100644
index d13903925467e97e353f0a4e6bcf9f6cdb8a3664..0000000000000000000000000000000000000000
--- a/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python
-
-import yaml
-import platform
-from textwrap import dedent
-
-
-def _make_versions_html(versions):
-    html = [
-        dedent(
-            """\\
-            <style>
-            #nf-core-versions tbody:nth-child(even) {
-                background-color: #f2f2f2;
-            }
-            </style>
-            <table class="table" style="width:100%" id="nf-core-versions">
-                <thead>
-                    <tr>
-                        <th> Process Name </th>
-                        <th> Software </th>
-                        <th> Version  </th>
-                    </tr>
-                </thead>
-            """
-        )
-    ]
-    for process, tmp_versions in sorted(versions.items()):
-        html.append("<tbody>")
-        for i, (tool, version) in enumerate(sorted(tmp_versions.items())):
-            html.append(
-                dedent(
-                    f"""\\
-                    <tr>
-                        <td><samp>{process if (i == 0) else ''}</samp></td>
-                        <td><samp>{tool}</samp></td>
-                        <td><samp>{version}</samp></td>
-                    </tr>
-                    """
-                )
-            )
-        html.append("</tbody>")
-    html.append("</table>")
-    return "\\n".join(html)
-
-
-versions_this_module = {}
-versions_this_module["${task.process}"] = {
-    "python": platform.python_version(),
-    "yaml": yaml.__version__,
-}
-
-with open("$versions") as f:
-    versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module
-
-# aggregate versions by the module name (derived from fully-qualified process name)
-versions_by_module = {}
-for process, process_versions in versions_by_process.items():
-    module = process.split(":")[-1]
-    try:
-        assert versions_by_module[module] == process_versions, (
-            "We assume that software versions are the same between all modules. "
-            "If you see this error-message it means you discovered an edge-case "
-            "and should open an issue in nf-core/tools. "
-        )
-    except KeyError:
-        versions_by_module[module] = process_versions
-
-versions_by_module["Workflow"] = {
-    "Nextflow": "$workflow.nextflow.version",
-    "$workflow.manifest.name": "$workflow.manifest.version",
-}
-
-versions_mqc = {
-    "id": "software_versions",
-    "section_name": "${workflow.manifest.name} Software Versions",
-    "section_href": "https://github.com/${workflow.manifest.name}",
-    "plot_type": "html",
-    "description": "are collected at run time from the software output.",
-    "data": _make_versions_html(versions_by_module),
-}
-
-with open("software_versions.yml", "w") as f:
-    yaml.dump(versions_by_module, f, default_flow_style=False)
-with open("software_versions_mqc.yml", "w") as f:
-    yaml.dump(versions_mqc, f, default_flow_style=False)
-
-with open("versions.yml", "w") as f:
-    yaml.dump(versions_this_module, f, default_flow_style=False)
diff --git a/modules/nf-core/modules/custom/getchromsizes/main.nf b/modules/nf-core/modules/custom/getchromsizes/main.nf
deleted file mode 100644
index 0eabf3a4c3cdd9a862154859e1241990052dc2d4..0000000000000000000000000000000000000000
--- a/modules/nf-core/modules/custom/getchromsizes/main.nf
+++ /dev/null
@@ -1,32 +0,0 @@
-process CUSTOM_GETCHROMSIZES {
-    tag "$fasta"
-    label 'process_low'
-
-    conda (params.enable_conda ? "bioconda::samtools=1.15.1" : null)
-    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
-        'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
-        'quay.io/biocontainers/samtools:1.15.1--h1170115_0' }"
-
-    input:
-    path fasta
-
-    output:
-    path '*.sizes'      , emit: sizes
-    path '*.fai'        , emit: fai
-    path  "versions.yml", emit: versions
-
-    when:
-    task.ext.when == null || task.ext.when
-
-    script:
-    def args = task.ext.args ?: ''
-    """
-    samtools faidx $fasta
-    cut -f 1,2 ${fasta}.fai > ${fasta}.sizes
-
-    cat <<-END_VERSIONS > versions.yml
-    "${task.process}":
-        custom: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
-    END_VERSIONS
-    """
-}
diff --git a/modules/nf-core/modules/fastqc/main.nf b/modules/nf-core/modules/fastqc/main.nf
deleted file mode 100644
index 05730368b2d43e0eaac6b13a69f07ed54d1ed2cb..0000000000000000000000000000000000000000
--- a/modules/nf-core/modules/fastqc/main.nf
+++ /dev/null
@@ -1,59 +0,0 @@
-process FASTQC {
-    tag "$meta.id"
-    label 'process_medium'
-
-    conda (params.enable_conda ? "bioconda::fastqc=0.11.9" : null)
-    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
-        'https://depot.galaxyproject.org/singularity/fastqc:0.11.9--0' :
-        'quay.io/biocontainers/fastqc:0.11.9--0' }"
-
-    input:
-    tuple val(meta), path(reads)
-
-    output:
-    tuple val(meta), path("*.html"), emit: html
-    tuple val(meta), path("*.zip") , emit: zip
-    path  "versions.yml"           , emit: versions
-
-    when:
-    task.ext.when == null || task.ext.when
-
-    script:
-    def args = task.ext.args ?: ''
-    // Add soft-links to original FastQs for consistent naming in pipeline
-    def prefix = task.ext.prefix ?: "${meta.id}"
-    if (meta.single_end) {
-        """
-        [ ! -f  ${prefix}.fastq.gz ] && ln -s $reads ${prefix}.fastq.gz
-        fastqc $args --threads $task.cpus ${prefix}.fastq.gz
-
-        cat <<-END_VERSIONS > versions.yml
-        "${task.process}":
-            fastqc: \$( fastqc --version | sed -e "s/FastQC v//g" )
-        END_VERSIONS
-        """
-    } else {
-        """
-        [ ! -f  ${prefix}_1.fastq.gz ] && ln -s ${reads[0]} ${prefix}_1.fastq.gz
-        [ ! -f  ${prefix}_2.fastq.gz ] && ln -s ${reads[1]} ${prefix}_2.fastq.gz
-        fastqc $args --threads $task.cpus ${prefix}_1.fastq.gz ${prefix}_2.fastq.gz
-
-        cat <<-END_VERSIONS > versions.yml
-        "${task.process}":
-            fastqc: \$( fastqc --version | sed -e "s/FastQC v//g" )
-        END_VERSIONS
-        """
-    }
-
-    stub:
-    def prefix = task.ext.prefix ?: "${meta.id}"
-    """
-    touch ${prefix}.html
-    touch ${prefix}.zip
-
-    cat <<-END_VERSIONS > versions.yml
-    "${task.process}":
-        fastqc: \$( fastqc --version | sed -e "s/FastQC v//g" )
-    END_VERSIONS
-    """
-}
diff --git a/nextflow.config b/nextflow.config
index b003a2270e2297c98f77d5e204e06e3f06f0df69..83e09fb64cadef9db5592792d9f133b4c5d7520d 100644
--- a/nextflow.config
+++ b/nextflow.config
@@ -12,6 +12,7 @@ params {
     // Input options
     input = null
 
+
     // References
     genome = null
     igenomes_base = 's3://ngi-igenomes/igenomes'
@@ -49,8 +50,8 @@ params {
          ligation_site='GATCGATC'
       }
       'arima' {
-         restriction_site='^GATC,G^ANT'
-         ligation_site='GATCGATC,GATCGANT,GANTGATC,GANTGANT'
+         restriction_site='^GATC,G^ANTC'
+         ligation_site='GATCGATC,GATCANTC,GANTGATC,GANTANTC'
       }
     }
     
@@ -92,7 +93,9 @@ params {
     // MultiQC options
     multiqc_config             = null
     multiqc_title              = null
+    multiqc_logo               = null
     max_multiqc_email_size     = '25.MB'
+    multiqc_methods_description = null
 
     // Boilerplate options
     outdir                     = './results'
@@ -102,12 +105,13 @@ params {
     email_on_fail              = null
     plaintext_email            = false
     monochrome_logs            = false
+    hook_url                   = null
     help                       = false
+    version                    = false
     validate_params            = true
     show_hidden_params         = false
-    schema_ignore_params       = 'genomes'
-    enable_conda               = false
-
+    schema_ignore_params       = 'genomes,digest'
+ 
     // Config options
     custom_config_version      = 'master'
     custom_config_base         = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
@@ -116,6 +120,7 @@ params {
     config_profile_url         = null
     config_profile_name        = null
 
+
     // Max resource options
     // Defaults only, expecting to be overwritten
     max_memory                 = '128.GB'
@@ -149,7 +154,16 @@ try {
 profiles {
     debug { process.beforeScript = 'echo $HOSTNAME' }
     conda {
-        params.enable_conda    = true
+        conda.enabled          = true
+        docker.enabled         = false
+        singularity.enabled    = false
+        podman.enabled         = false
+        shifter.enabled        = false
+        charliecloud.enabled   = false
+    }
+    mamba {
+        conda.enabled          = true
+        conda.useMamba         = true
         docker.enabled         = false
         singularity.enabled    = false
         podman.enabled         = false
@@ -164,6 +178,9 @@ profiles {
         shifter.enabled        = false
         charliecloud.enabled   = false
     }
+    arm {
+        docker.runOptions = '-u $(id -u):$(id -g) --platform=linux/amd64'
+    }
     singularity {
         singularity.enabled    = true
         singularity.autoMounts = true
@@ -193,10 +210,16 @@ profiles {
         podman.enabled         = false
         shifter.enabled        = false
     }
+    gitpod {
+        executor.name          = 'local'
+        executor.cpus          = 16
+        executor.memory        = 60.GB
+    }
     test      { includeConfig 'conf/test.config'      }
     test_full { includeConfig 'conf/test_full.config' }
 }
 
+
 // Load igenomes.config if required
 if (!params.igenomes_ignore) {
     includeConfig 'conf/igenomes.config'
@@ -204,6 +227,7 @@ if (!params.igenomes_ignore) {
     params.genomes = [:]
 }
 
+
 // Export these variables to prevent local Python/R libraries from conflicting with those in the container
 // The JULIA depot path has been adjusted to a fixed path `/usr/local/share/julia` that needs to be used for packages in the container.
 // See https://apeltzer.github.io/post/03-julia-lang-nextflow/ for details on that. Once we have a common agreement on where to keep Julia packages, this is adjustable.
@@ -238,12 +262,13 @@ dag {
 
 manifest {
     name            = 'nf-core/hic'
-    author          = 'Nicolas Servant'
+    author          = """Nicolas Servant"""
     homePage        = 'https://github.com/nf-core/hic'
-    description     = 'Analysis of Chromosome Conformation Capture data (Hi-C)'
+    description     = """Analysis of Chromosome Conformation Capture data (Hi-C)"""
     mainScript      = 'main.nf'
-    nextflowVersion = '!>=21.10.3'
-    version         = '1.4.0dev'
+    nextflowVersion = '!>=22.10.1'
+    version = '1.4.0'
+    doi             = ''
 }
 
 // Load modules.config for DSL2 module specific options
diff --git a/nextflow_schema.json b/nextflow_schema.json
index ecc9df497f2ca3662468e6649ae602becd4a7542..96e9e09ebf0880d1494566c733fdc2e07129c21b 100644
--- a/nextflow_schema.json
+++ b/nextflow_schema.json
@@ -433,6 +433,12 @@
                     "fa_icon": "fas fa-question-circle",
                     "hidden": true
                 },
+                "version": {
+                    "type": "boolean",
+                    "description": "Display version and exit.",
+                    "fa_icon": "fas fa-question-circle",
+                    "hidden": true
+                },
                 "publish_dir_mode": {
                     "type": "string",
                     "default": "copy",
@@ -470,12 +476,30 @@
                     "fa_icon": "fas fa-palette",
                     "hidden": true
                 },
+                "hook_url": {
+                    "type": "string",
+                    "description": "Incoming hook URL for messaging service",
+                    "fa_icon": "fas fa-people-group",
+                    "help_text": "Incoming hook URL for messaging service. Currently, MS Teams and Slack are supported.",
+                    "hidden": true
+                },
                 "multiqc_config": {
                     "type": "string",
                     "description": "Custom config file to supply to MultiQC.",
                     "fa_icon": "fas fa-cog",
                     "hidden": true
                 },
+                "multiqc_logo": {
+                    "type": "string",
+                    "description": "Custom logo file to supply to MultiQC. File name must also be set in the MultiQC config file",
+                    "fa_icon": "fas fa-image",
+                    "hidden": true
+                },
+                "multiqc_methods_description": {
+                    "type": "string",
+                    "description": "Custom MultiQC yaml file containing HTML including a methods description.",
+                    "fa_icon": "fas fa-cog"
+                },
                 "tracedir": {
                     "type": "string",
                     "description": "Directory to keep pipeline Nextflow logs and reports.",
@@ -496,12 +520,6 @@
                     "description": "Show all params when using `--help`",
                     "hidden": true,
                     "help_text": "By default, parameters set as _hidden_ in the schema are not shown on the command line when a user runs with `--help`. Specifying this option will tell the pipeline to show all parameters."
-                },
-                "enable_conda": {
-                    "type": "boolean",
-                    "description": "Run this workflow with Conda. You can also use '-profile conda' instead of providing this parameter.",
-                    "hidden": true,
-                    "fa_icon": "fas fa-bacon"
                 }
             }
         }
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..0d62beb6f970a40843767771cc66ee0df14b21ce
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,10 @@
+# Config file for Python. Mostly used to configure linting of bin/check_samplesheet.py with Black.
+# Should be kept the same as nf-core/tools to avoid fighting with template synchronisation.
+[tool.black]
+line-length = 120
+target_version = ["py37", "py38", "py39", "py310"]
+
+[tool.isort]
+profile = "black"
+known_first_party = ["nf_core"]
+multi_line_output = 3
diff --git a/subworkflows/local/compartments.nf b/subworkflows/local/compartments.nf
index 23d78ac0f3083a00224b130655d94070b1478228..fee68a5d5d8c3a061d02a6f698aff80186b8d7c3 100644
--- a/subworkflows/local/compartments.nf
+++ b/subworkflows/local/compartments.nf
@@ -1,4 +1,4 @@
-include { CALL_COMPARTMENTS } from '../../modules/local/cooltools/eigs-cis'
+include { COOLTOOLS_EIGSCIS } from '../../modules/local/cooltools/eigscis'
 
 workflow COMPARTMENTS {
 
@@ -10,14 +10,14 @@ workflow COMPARTMENTS {
   main:
   ch_versions = Channel.empty()
 
-  CALL_COMPARTMENTS (
+  COOLTOOLS_EIGSCIS(
     cool,
-    fasta.collect(),
-    chrsize.collect()
+    fasta.map{it -> it[1]}.collect(),
+    chrsize.map{it -> it[1]}.collect()
   )
-  ch_versions = ch_versions.mix(CALL_COMPARTMENTS.out.versions)
+  ch_versions = ch_versions.mix(COOLTOOLS_EIGSCIS.out.versions)
 
   emit:
   versions = ch_versions
-  compartments = CALL_COMPARTMENTS.out.results
+  compartments = COOLTOOLS_EIGSCIS.out.results
 }
\ No newline at end of file
diff --git a/subworkflows/local/cooler.nf b/subworkflows/local/cooler.nf
index 7ce15de2065780d12154f1b9fa31044f33948c91..2525931bbd57b57252a49f6e80912e37fe9e83ec 100644
--- a/subworkflows/local/cooler.nf
+++ b/subworkflows/local/cooler.nf
@@ -4,12 +4,11 @@
  * OUTPUT : cooler files
  */
 
-include { COOLER_ZOOMIFY } from '../../modules/nf-core/modules/cooler/zoomify/main'
-
-include { COOLER_DUMP } from '../../modules/local/cooler/dump' 
-include { COOLER_CLOAD } from '../../modules/local/cooler/cload' 
-include { COOLER_BALANCE } from '../../modules/local/cooler/balance'
-include { COOLER_MAKEBINS } from '../../modules/local/cooler/makebins'
+include { COOLER_ZOOMIFY } from '../../modules/nf-core/cooler/zoomify/main'
+include { COOLER_DUMP } from '../../modules/nf-core/cooler/dump/main' 
+include { COOLER_CLOAD } from '../../modules/nf-core/cooler/cload/main' 
+include { COOLER_BALANCE } from '../../modules/nf-core/cooler/balance/main'
+include { COOLER_MAKEBINS } from '../../modules/nf-core/cooler/makebins/main'
 
 include { SPLIT_COOLER_DUMP } from '../../modules/local/split_cooler_dump'
 
@@ -25,7 +24,7 @@ workflow COOLER {
 
   take:
   pairs // [meta, pairs, index]
-  chromsize
+  chromsize // [meta, chromsize]
   cool_bins
 
   main:
@@ -45,7 +44,7 @@ workflow COOLER {
 
   COOLER_CLOAD(
     pairs.combine(cool_bins),
-    chromsize.collect()
+    chromsize.map{it -> it[1]}.collect()
   )
   ch_versions = ch_versions.mix(COOLER_CLOAD.out.versions)
 
@@ -86,10 +85,6 @@ workflow COOLER {
   )
   ch_versions = ch_versions.mix(COOLER_DUMP.out.versions)
 
-  //COOLER_DUMP(
-  //  COOLER_ZOOMIFY.out.mcool.combine(cool_bins).map{it->[it[0], it[1], it[2]]}
-  //)
-
   SPLIT_COOLER_DUMP(
     COOLER_DUMP.out.bedpe
   )
diff --git a/subworkflows/local/hicpro.nf b/subworkflows/local/hicpro.nf
index cb6f33cd4d188138beb4bfb3abf1d1f5e5bb65aa..8b106a0820cf808501add6e5ad886cc726626107 100644
--- a/subworkflows/local/hicpro.nf
+++ b/subworkflows/local/hicpro.nf
@@ -86,8 +86,6 @@ workflow HICPRO {
   )
   ch_versions = ch_versions.mix(MERGE_VALID_INTERACTION.out.versions)
 
-
-  ch_hicpro_stats.view()
   MERGE_STATS(
     ch_hicpro_stats
   )
diff --git a/subworkflows/local/hicpro_mapping.nf b/subworkflows/local/hicpro_mapping.nf
index 3529f4ef247b5d9ad76d48a021e0023a8a948061..0f889e95f425becd78f6ab097ea035b6bd6e7abf 100644
--- a/subworkflows/local/hicpro_mapping.nf
+++ b/subworkflows/local/hicpro_mapping.nf
@@ -3,9 +3,9 @@
  * From the raw sequencing reads to a paired-end bam file
  */
 
-include { BOWTIE2_ALIGN } from '../../modules/nf-core/modules/bowtie2/align/main'
+include { BOWTIE2_ALIGN } from '../../modules/nf-core/bowtie2/align/main'
 include { TRIM_READS } from '../../modules/local/hicpro/trim_reads'
-include { BOWTIE2_ALIGN as BOWTIE2_ALIGN_TRIMMED } from '../../modules/nf-core/modules/bowtie2/align/main'
+include { BOWTIE2_ALIGN as BOWTIE2_ALIGN_TRIMMED } from '../../modules/nf-core/bowtie2/align/main'
 include { MERGE_BOWTIE2 } from '../../modules/local/hicpro/bowtie2_merge'
 include { COMBINE_MATES} from '../../modules/local/hicpro/combine_mates'
 include { MAPPING_STATS_DNASE } from '../../modules/local/hicpro/dnase_mapping_stats'
@@ -35,7 +35,7 @@ workflow HICPRO_MAPPING {
 
   take:
   reads // [meta, read1, read2]
-  index // path
+  index // [meta, path]
   ligation_site // value
 
   main:
@@ -46,11 +46,12 @@ workflow HICPRO_MAPPING {
   ch_reads_r2 = reads.map{ it -> pairToSingle(it,"R2") }
   ch_reads = ch_reads_r1.concat(ch_reads_r2)
 
-  // bowtie2
+  // bowtie2 - save_unaligned=true - sort_bam=false
   BOWTIE2_ALIGN(
     ch_reads,
     index.collect(),
-    Channel.value(true).collect()
+    true,
+    false
   )
   ch_versions = ch_versions.mix(BOWTIE2_ALIGN.out.versions)
 
@@ -62,11 +63,12 @@ workflow HICPRO_MAPPING {
     )
     ch_versions = ch_versions.mix(TRIM_READS.out.versions)
 
-    // bowtie2 on trimmed reads
+    // bowtie2 on trimmed reads - save_unaligned=false - sort_bam=false
     BOWTIE2_ALIGN_TRIMMED(
       TRIM_READS.out.fastq,
       index.collect(),
-      Channel.value(false).collect()
+      false,
+      false
     )
     ch_versions = ch_versions.mix(BOWTIE2_ALIGN_TRIMMED.out.versions)
 
@@ -100,8 +102,6 @@ workflow HICPRO_MAPPING {
       .set {ch_bams}
   }
 
-  ch_bams.view()
-
   COMBINE_MATES (
     ch_bams
   )
diff --git a/subworkflows/local/input_check.nf b/subworkflows/local/input_check.nf
index 9735a49889301692fde334da4403577bfa4313c6..3f21f1f2c5a6a82fc781647724c6db3b574813e0 100644
--- a/subworkflows/local/input_check.nf
+++ b/subworkflows/local/input_check.nf
@@ -9,7 +9,6 @@ workflow INPUT_CHECK {
     samplesheet // file: /path/to/samplesheet.csv
 
     main:
-
     if (params.split_fastq){
 
       SAMPLESHEET_CHECK ( samplesheet )
diff --git a/subworkflows/local/prepare_genome.nf b/subworkflows/local/prepare_genome.nf
index dd9dd291d864e66122e247f13e516f212ef445bb..a4a2399303570928d763612bd03d88bd4b485f3b 100644
--- a/subworkflows/local/prepare_genome.nf
+++ b/subworkflows/local/prepare_genome.nf
@@ -2,8 +2,8 @@
  * Prepare Annotation Genome for Hi-C data analysis
  */
 
-include { BOWTIE2_BUILD } from '../../modules/nf-core/modules/bowtie2/build/main'
-include { CUSTOM_GETCHROMSIZES } from '../../modules/nf-core/modules/custom/getchromsizes/main'
+include { BOWTIE2_BUILD } from '../../modules/nf-core/bowtie2/build/main'
+include { CUSTOM_GETCHROMSIZES } from '../../modules/nf-core/custom/getchromsizes/main'
 include { GET_RESTRICTION_FRAGMENTS } from '../../modules/local/hicpro/get_restriction_fragments'
 
 workflow PREPARE_GENOME {
@@ -25,6 +25,7 @@ workflow PREPARE_GENOME {
     ch_versions = ch_versions.mix(BOWTIE2_BUILD.out.versions)
   }else{
     Channel.fromPath( params.bwt2_index , checkIfExists: true)
+           .map { it -> [[:], it]}
            .ifEmpty { exit 1, "Genome index: Provided index not found: ${params.bwt2_index}" }
            .set { ch_index }
   }
@@ -39,6 +40,7 @@ workflow PREPARE_GENOME {
     ch_versions = ch_versions.mix(CUSTOM_GETCHROMSIZES.out.versions)
   }else{
     Channel.fromPath( params.chromosome_size , checkIfExists: true)
+           .map { it -> [[:], it]}
            .set {ch_chromsize} 
   }
 
@@ -53,6 +55,7 @@ workflow PREPARE_GENOME {
     ch_versions = ch_versions.mix(GET_RESTRICTION_FRAGMENTS.out.versions)
   }else if (!params.dnase){
      Channel.fromPath( params.restriction_fragments, checkIfExists: true )
+            .map{ it -> [[:], it] }
             .set {ch_resfrag}
   }else{
     ch_resfrag = Channel.empty()
diff --git a/subworkflows/local/tads.nf b/subworkflows/local/tads.nf
index 5e7f4fc305e55e6707a2a320026b91292613da71..31c1e38b03b8f360c25014e92767f3d9705c434f 100644
--- a/subworkflows/local/tads.nf
+++ b/subworkflows/local/tads.nf
@@ -1,4 +1,4 @@
-include { INSULATION } from '../../modules/local/cooltools/insulation'
+include { COOLTOOLS_INSULATION } from '../../modules/local/cooltools/insulation'
 include { HIC_FIND_TADS } from '../../modules/local/hicexplorer/hicFindTADs'
 
 workflow TADS {
@@ -11,9 +11,9 @@ workflow TADS {
   ch_tads = Channel.empty()
 
   if (params.tads_caller =~ 'insulation'){
-    INSULATION(cool)
-    ch_versions = ch_versions.mix(INSULATION.out.versions)
-    ch_tads = ch_tads.mix(INSULATION.out.results)
+    COOLTOOLS_INSULATION(cool)
+    ch_versions = ch_versions.mix(COOLTOOLS_INSULATION.out.versions)
+    ch_tads = ch_tads.mix(COOLTOOLS_INSULATION.out.tsv)
   }
   
   if (params.tads_caller =~ 'hicexplorer'){
diff --git a/workflows/hic.nf b/workflows/hic.nf
index a58b89c57a3b19399c95a22a93a82eb330cecf72..362d51bd8a547c73d1ba7274e0ba5dc1c32c03ef 100644
--- a/workflows/hic.nf
+++ b/workflows/hic.nf
@@ -67,7 +67,7 @@ if (params.res_dist_decay && !params.skip_dist_decay){
     .set {ch_ddecay_res}
    ch_map_res = ch_map_res.concat(ch_ddecay_res)
 }else{
-  ch_ddecay_res = Channel.create()
+  ch_ddecay_res = Channel.empty()
   if (!params.skip_dist_decay){
     log.warn "[nf-core/hic] Hi-C resolution for distance decay not specified. See --res_dist_decay" 
   }
@@ -81,7 +81,7 @@ if (params.res_compartments && !params.skip_compartments){
     .set {ch_comp_res}
    ch_map_res = ch_map_res.concat(ch_comp_res)
 }else{
-  ch_comp_res = Channel.create()
+  ch_comp_res = Channel.empty()
   if (!params.skip_compartments){
     log.warn "[nf-core/hic] Hi-C resolution for compartment calling not specified. See --res_compartments" 
   }
@@ -94,8 +94,10 @@ ch_map_res = ch_map_res.unique()
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
-ch_multiqc_config        = file("$projectDir/assets/multiqc_config.yml", checkIfExists: true)
-ch_multiqc_custom_config = params.multiqc_config ? Channel.fromPath(params.multiqc_config) : Channel.empty()
+ch_multiqc_config          = Channel.fromPath("$projectDir/assets/multiqc_config.yml", checkIfExists: true)
+ch_multiqc_custom_config   = params.multiqc_config ? Channel.fromPath( params.multiqc_config, checkIfExists: true ) : Channel.empty()
+ch_multiqc_logo            = params.multiqc_logo   ? Channel.fromPath( params.multiqc_logo, checkIfExists: true ) : Channel.empty()
+ch_multiqc_custom_methods_description = params.multiqc_methods_description ? file(params.multiqc_methods_description, checkIfExists: true) : file("$projectDir/assets/methods_description_template.yml", checkIfExists: true)
 
 /*
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -128,10 +130,8 @@ include { TADS } from '../subworkflows/local/tads'
 //
 // MODULE: Installed directly from nf-core/modules
 //
-
-include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/modules/custom/dumpsoftwareversions/main'
-include { FASTQC  } from '../modules/nf-core/modules/fastqc/main'
-//include { MULTIQC } from '../modules/nf-core/modules/multiqc/main'
+include { FASTQC                      } from '../modules/nf-core/fastqc/main'
+include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/custom/dumpsoftwareversions/main'
 
 /*
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -141,6 +141,7 @@ include { FASTQC  } from '../modules/nf-core/modules/fastqc/main'
 
 Channel.fromPath( params.fasta )
        .ifEmpty { exit 1, "Genome index: Fasta file not found: ${params.fasta}" }
+       .map{it->[[:],it]}
        .set { ch_fasta }
 
 /*
@@ -183,6 +184,7 @@ workflow HIC {
   //
   // SUB-WORFLOW: HiC-Pro
   //
+  INPUT_CHECK.out.reads.view()
   HICPRO (
     INPUT_CHECK.out.reads,
     PREPARE_GENOME.out.index,
@@ -290,10 +292,13 @@ workflow HIC {
 */
 
 workflow.onComplete {
-  if (params.email || params.email_on_fail) {
-      NfcoreTemplate.email(workflow, params, summary_params, projectDir, log, multiqc_report)
-  }
-  NfcoreTemplate.summary(workflow, params, log)
+    if (params.email || params.email_on_fail) {
+        NfcoreTemplate.email(workflow, params, summary_params, projectDir, log, multiqc_report)
+    }
+    NfcoreTemplate.summary(workflow, params, log)
+    if (params.hook_url) {
+        NfcoreTemplate.IM_notification(workflow, params, summary_params, projectDir, log)
+    }
 }
 
 /*