diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000000000000000000000000000000000000..b6b3190776e8d7f8894ed6484494018355814fc6
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,24 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+trim_trailing_whitespace = true
+indent_size = 4
+indent_style = space
+
+[*.{md,yml,yaml,html,css,scss,js}]
+indent_size = 2
+
+# These files are edited and tested upstream in nf-core/modules
+[/modules/nf-core/**]
+charset = unset
+end_of_line = unset
+insert_final_newline = unset
+trim_trailing_whitespace = unset
+indent_style = unset
+indent_size = unset
+
+[/assets/email*]
+indent_size = unset
diff --git a/.gitattributes b/.gitattributes
index 7fe55006f87bb1a423e2cdf70258a55543c2486d..050bb1203530c3ee0b610fe2a05aed5059a3bf19 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1 +1,3 @@
 *.config linguist-language=nextflow
+modules/nf-core/** linguist-generated
+subworkflows/nf-core/** linguist-generated
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 284970f00a1e6d4fa739c1faa0805642fdc0668f..21343c4d8ef64758fbfe436553442d6a857c7024 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -15,11 +15,10 @@ Contributions to the code are even more welcome ;)
 
 If you'd like to write some code for nf-core/hic, the standard workflow is as follows:
 
-1. Check that there isn't already an issue about your idea in the [nf-core/hic issues](https://github.com/nf-core/hic/issues) to avoid duplicating work
-    * If there isn't one already, please create one so that others know you're working on this
+1. Check that there isn't already an issue about your idea in the [nf-core/hic issues](https://github.com/nf-core/hic/issues) to avoid duplicating work. If there isn't one already, please create one so that others know you're working on this
 2. [Fork](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) the [nf-core/hic repository](https://github.com/nf-core/hic) to your GitHub account
 3. Make the necessary changes / additions within your forked repository following [Pipeline conventions](#pipeline-contribution-conventions)
-4. Use `nf-core schema build .` and add any new parameters to the pipeline JSON schema (requires [nf-core tools](https://github.com/nf-core/tools) >= 1.10).
+4. Use `nf-core schema build` and add any new parameters to the pipeline JSON schema (requires [nf-core tools](https://github.com/nf-core/tools) >= 1.10).
 5. Submit a Pull Request against the `dev` branch and wait for the code to be reviewed and merged
 
 If you're not used to this workflow with git, you can start with some [docs from GitHub](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests) or even their [excellent `git` resources](https://try.github.io/).
@@ -49,9 +48,9 @@ These tests are run both with the latest available version of `Nextflow` and als
 
 :warning: Only in the unlikely and regretful event of a release happening with a bug.
 
-* On your own fork, make a new branch `patch` based on `upstream/master`.
-* Fix the bug, and bump version (X.Y.Z+1).
-* A PR should be made on `master` from patch to directly this particular bug.
+- On your own fork, make a new branch `patch` based on `upstream/master`.
+- Fix the bug, and bump version (X.Y.Z+1).
+- A PR should be made on `master` from patch to directly this particular bug.
 
 ## Getting help
 
@@ -68,22 +67,19 @@ If you wish to contribute a new step, please use the following coding standards:
 1. Define the corresponding input channel into your new process from the expected previous process channel
 2. Write the process block (see below).
 3. Define the output channel if needed (see below).
-4. Add any new flags/options to `nextflow.config` with a default (see below).
-5. Add any new flags/options to `nextflow_schema.json` with help text (with `nf-core schema build .`).
-6. Add any new flags/options to the help message (for integer/text parameters, print to help the corresponding `nextflow.config` parameter).
-7. Add sanity checks for all relevant parameters.
-8. Add any new software to the `scrape_software_versions.py` script in `bin/` and the version command to the `scrape_software_versions` process in `main.nf`.
-9. Do local tests that the new code works properly and as expected.
-10. Add a new test command in `.github/workflow/ci.yaml`.
-11. If applicable add a [MultiQC](https://https://multiqc.info/) module.
-12. Update MultiQC config `assets/multiqc_config.yaml` so relevant suffixes, name clean up, General Statistics Table column order, and module figures are in the right order.
-13. Optional: Add any descriptions of MultiQC report sections and output files to `docs/output.md`.
+4. Add any new parameters to `nextflow.config` with a default (see below).
+5. Add any new parameters to `nextflow_schema.json` with help text (via the `nf-core schema build` tool).
+6. Add sanity checks and validation for all relevant parameters.
+7. Perform local tests to validate that the new code works as expected.
+8. If applicable, add a new test command in `.github/workflow/ci.yml`.
+9. Update MultiQC config `assets/multiqc_config.yml` so relevant suffixes, file name clean up and module plots are in the appropriate order. If applicable, add a [MultiQC](https://https://multiqc.info/) module.
+10. Add a description of the output files and if relevant any appropriate images from the MultiQC report to `docs/output.md`.
 
 ### Default values
 
 Parameters should be initialised / defined with default values in `nextflow.config` under the `params` scope.
 
-Once there, use `nf-core schema build .` to add to `nextflow_schema.json`.
+Once there, use `nf-core schema build` to add to `nextflow_schema.json`.
 
 ### Default processes resource requirements
 
@@ -95,34 +91,13 @@ The process resources can be passed on to the tool dynamically within the proces
 
 Please use the following naming schemes, to make it easy to understand what is going where.
 
-* initial process channel: `ch_output_from_<process>`
-* intermediate and terminal channels: `ch_<previousprocess>_for_<nextprocess>`
+- initial process channel: `ch_output_from_<process>`
+- intermediate and terminal channels: `ch_<previousprocess>_for_<nextprocess>`
 
 ### Nextflow version bumping
 
 If you are using a new feature from core Nextflow, you may bump the minimum required version of nextflow in the pipeline with: `nf-core bump-version --nextflow . [min-nf-version]`
 
-### Software version reporting
-
-If you add a new tool to the pipeline, please ensure you add the information of the tool to the `get_software_version` process.
-
-Add to the script block of the process, something like the following:
-
-```bash
-<YOUR_TOOL> --version &> v_<YOUR_TOOL>.txt 2>&1 || true
-```
-
-or
-
-```bash
-<YOUR_TOOL> --help | head -n 1 &> v_<YOUR_TOOL>.txt 2>&1 || true
-```
-
-You then need to edit the script `bin/scrape_software_versions.py` to:
-
-1. Add a Python regex for your tool's `--version` output (as in stored in the `v_<YOUR_TOOL>.txt` file), to ensure the version is reported as a `v` and the version number e.g. `v2.1.1`
-2. Add a HTML entry to the `OrderedDict` for formatting in MultiQC.
-
 ### Images and figures
 
 For overview images and other documents we follow the nf-core [style guidelines and examples](https://nf-co.re/developers/design_guidelines).
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
deleted file mode 100644
index 81c1e337d22cadb133d32afc4fd097ca49d55075..0000000000000000000000000000000000000000
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ /dev/null
@@ -1,64 +0,0 @@
----
-name: Bug report
-about: Report something that is broken or incorrect
-labels: bug
----
-
-<!--
-# nf-core/hic bug report
-
-Hi there!
-
-Thanks for telling us about a problem with the pipeline.
-Please delete this text and anything that's not relevant from the template below:
--->
-
-## Check Documentation
-
-I have checked the following places for your error:
-
-- [ ] [nf-core website: troubleshooting](https://nf-co.re/usage/troubleshooting)
-- [ ] [nf-core/hic pipeline documentation](https://nf-co.re/hic/usage)
-
-## Description of the bug
-
-<!-- A clear and concise description of what the bug is. -->
-
-## Steps to reproduce
-
-Steps to reproduce the behaviour:
-
-1. Command line: <!-- [e.g. `nextflow run ...`] -->
-2. See error: <!-- [Please provide your error message] -->
-
-## Expected behaviour
-
-<!-- A clear and concise description of what you expected to happen. -->
-
-## Log files
-
-Have you provided the following extra information/files:
-
-- [ ] The command used to run the pipeline
-- [ ] The `.nextflow.log` file <!-- this is a hidden file in the directory where you launched the pipeline -->
-
-## System
-
-- Hardware: <!-- [e.g. HPC, Desktop, Cloud...] -->
-- Executor: <!-- [e.g. slurm, local, awsbatch...] -->
-- OS: <!-- [e.g. CentOS Linux, macOS, Linux Mint...] -->
-- Version <!-- [e.g. 7, 10.13.6, 18.3...] -->
-
-## Nextflow Installation
-
-- Version: <!-- [e.g. 19.10.0] -->
-
-## Container engine
-
-- Engine: <!-- [e.g. Conda, Docker, Singularity, Podman, Shifter or Charliecloud] -->
-- version: <!-- [e.g. 1.0.0] -->
-- Image tag: <!-- [e.g. nfcore/hic:1.0.0] -->
-
-## Additional context
-
-<!-- Add any other context about the problem here. -->
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 0000000000000000000000000000000000000000..98a7950ce9bef303424ad5ce1c10c1c6ed135f1d
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,50 @@
+name: Bug report
+description: Report something that is broken or incorrect
+labels: bug
+body:
+  - type: markdown
+    attributes:
+      value: |
+        Before you post this issue, please check the documentation:
+
+        - [nf-core website: troubleshooting](https://nf-co.re/usage/troubleshooting)
+        - [nf-core/hic pipeline documentation](https://nf-co.re/hic/usage)
+
+  - type: textarea
+    id: description
+    attributes:
+      label: Description of the bug
+      description: A clear and concise description of what the bug is.
+    validations:
+      required: true
+
+  - type: textarea
+    id: command_used
+    attributes:
+      label: Command used and terminal output
+      description: Steps to reproduce the behaviour. Please paste the command you used to launch the pipeline and the output from your terminal.
+      render: console
+      placeholder: |
+        $ nextflow run ...
+
+        Some output where something broke
+
+  - type: textarea
+    id: files
+    attributes:
+      label: Relevant files
+      description: |
+        Please drag and drop the relevant files here. Create a `.zip` archive if the extension is not allowed.
+        Your verbose log file `.nextflow.log` is often useful _(this is a hidden file in the directory where you launched the pipeline)_ as well as custom Nextflow configuration files.
+
+  - type: textarea
+    id: system
+    attributes:
+      label: System information
+      description: |
+        * Nextflow version _(eg. 21.10.3)_
+        * Hardware _(eg. HPC, Desktop, Cloud)_
+        * Executor _(eg. slurm, local, awsbatch)_
+        * Container engine: _(e.g. Docker, Singularity, Conda, Podman, Shifter or Charliecloud)_
+        * OS _(eg. CentOS Linux, macOS, Linux Mint)_
+        * Version of nf-core/hic _(eg. 1.1, 1.5, 1.8.2)_
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 887f04598ba6bb758d35d2ca79012ccc8129ff34..379c60ae3294f480310d7e5ebeb2f6641dac2374 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -1,4 +1,3 @@
-blank_issues_enabled: false
 contact_links:
   - name: Join nf-core
     url: https://nf-co.re/join
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index 2cec9b3b778f87d420a0d124094557fe5b8efadf..0000000000000000000000000000000000000000
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,32 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for the nf-core/hic pipeline
-labels: enhancement
----
-
-<!--
-# nf-core/hic feature request
-
-Hi there!
-
-Thanks for suggesting a new feature for the pipeline!
-Please delete this text and anything that's not relevant from the template below:
--->
-
-## Is your feature request related to a problem? Please describe
-
-<!-- A clear and concise description of what the problem is. -->
-
-<!-- e.g. [I'm always frustrated when ...] -->
-
-## Describe the solution you'd like
-
-<!-- A clear and concise description of what you want to happen. -->
-
-## Describe alternatives you've considered
-
-<!-- A clear and concise description of any alternative solutions or features you've considered. -->
-
-## Additional context
-
-<!-- Add any other context about the feature request here. -->
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d411b185d23b589888a16fc63b564928ba95afca
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,11 @@
+name: Feature request
+description: Suggest an idea for the nf-core/hic pipeline
+labels: enhancement
+body:
+  - type: textarea
+    id: description
+    attributes:
+      label: Description of feature
+      description: Please describe your suggestion for a new feature. It might help to describe a problem or use case, plus any alternatives that you have considered.
+    validations:
+      required: true
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index ab821a6a0f04cb49069cbd36f97f843a12405cd1..56233aa3631298e9a762b06c6f868ee171b41773 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -10,17 +10,15 @@ Remember that PRs should be made against the dev branch, unless you're preparing
 
 Learn more about contributing: [CONTRIBUTING.md](https://github.com/nf-core/hic/tree/master/.github/CONTRIBUTING.md)
 -->
-<!-- markdownlint-disable ul-indent -->
 
 ## PR checklist
 
 - [ ] This comment contains a description of changes (with reason).
 - [ ] If you've fixed a bug or added code that should be tested, add tests!
-    - [ ] If you've added a new tool - add to the software_versions process and a regex to `scrape_software_versions.py`
-    - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/hic/tree/master/.github/CONTRIBUTING.md)
-    - [ ] If necessary, also make a PR on the nf-core/hic _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository.
-- [ ] Make sure your code lints (`nf-core lint .`).
-- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker`).
+  - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/hic/tree/master/.github/CONTRIBUTING.md)
+  - [ ] If necessary, also make a PR on the nf-core/hic _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository.
+- [ ] Make sure your code lints (`nf-core lint`).
+- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker --outdir <OUTDIR>`).
 - [ ] Usage Documentation in `docs/usage.md` is updated.
 - [ ] Output Documentation in `docs/output.md` is updated.
 - [ ] `CHANGELOG.md` is updated.
diff --git a/.github/markdownlint.yml b/.github/markdownlint.yml
deleted file mode 100644
index 8d7eb53b07463c24bd981a479a7d0591fabf7463..0000000000000000000000000000000000000000
--- a/.github/markdownlint.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-# Markdownlint configuration file
-default: true
-line-length: false
-no-duplicate-header:
-    siblings_only: true
-no-inline-html:
-    allowed_elements:
-        - img
-        - p
-        - kbd
-        - details
-        - summary
diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml
index cefb14a0ab730b7b0247a8efeb72a84da964bd92..4d71d9a3f171658e4189107f2abc239f9f98bfa2 100644
--- a/.github/workflows/awsfulltest.yml
+++ b/.github/workflows/awsfulltest.yml
@@ -1,45 +1,33 @@
 name: nf-core AWS full size tests
 # This workflow is triggered on published releases.
-# It can be additionally triggered manually with GitHub actions workflow dispatch.
+# It can be additionally triggered manually with GitHub actions workflow dispatch button.
 # It runs the -profile 'test_full' on AWS batch
 
 on:
-  workflow_run:
-    workflows: ["nf-core Docker push (release)"]
-    types: [completed]
+  release:
+    types: [published]
   workflow_dispatch:
-
-
-env:
-  AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
-  AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
-  TOWER_ACCESS_TOKEN: ${{ secrets.AWS_TOWER_TOKEN }}
-  AWS_JOB_DEFINITION: ${{ secrets.AWS_JOB_DEFINITION }}
-  AWS_JOB_QUEUE: ${{ secrets.AWS_JOB_QUEUE }}
-  AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
-
-
 jobs:
-  run-awstest:
+  run-tower:
     name: Run AWS full tests
     if: github.repository == 'nf-core/hic'
     runs-on: ubuntu-latest
     steps:
-      - name: Setup Miniconda
-        uses: conda-incubator/setup-miniconda@v2
-        with:
-          auto-update-conda: true
-          python-version: 3.7
-      - name: Install awscli
-        run: conda install -c conda-forge awscli
-      - name: Start AWS batch job
+      - name: Launch workflow via tower
+        uses: nf-core/tower-action@v3
+        # TODO nf-core: You can customise AWS full pipeline tests as required
         # Add full size test data (but still relatively small datasets for few samples)
         # on the `test_full.config` test runs with only one set of parameters
-        # Then specify `-profile test_full` instead of `-profile test` on the AWS batch command
-        run: |
-          aws batch submit-job \
-            --region eu-west-1 \
-            --job-name nf-core-hic \
-            --job-queue $AWS_JOB_QUEUE \
-            --job-definition $AWS_JOB_DEFINITION \
-            --container-overrides '{"command": ["nf-core/hic", "-r '"${GITHUB_SHA}"' -profile test_full --outdir s3://'"${AWS_S3_BUCKET}"'/hic/results-'"${GITHUB_SHA}"' -w s3://'"${AWS_S3_BUCKET}"'/hic/work-'"${GITHUB_SHA}"' -with-tower"], "environment": [{"name": "TOWER_ACCESS_TOKEN", "value": "'"$TOWER_ACCESS_TOKEN"'"}]}'
+        with:
+          workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }}
+          access_token: ${{ secrets.TOWER_ACCESS_TOKEN }}
+          compute_env: ${{ secrets.TOWER_COMPUTE_ENV }}
+          workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/hic/work-${{ github.sha }}
+          parameters: |
+            {
+              "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/hic/results-${{ github.sha }}"
+            }
+          profiles: test_full,aws_tower
+          nextflow_config: |
+            process.errorStrategy = 'retry'
+            process.maxRetries = 3
diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml
index c9eafe60d1f9e232718fe7cd392ba09b984a06ce..f892cfd3f1d6deb2f2c57cd60269c51638c85dc6 100644
--- a/.github/workflows/awstest.yml
+++ b/.github/workflows/awstest.yml
@@ -1,41 +1,28 @@
 name: nf-core AWS test
-# This workflow is triggered on push to the master branch.
-# It can be additionally triggered manually with GitHub actions workflow dispatch.
-# It runs the -profile 'test' on AWS batch.
+# This workflow can be triggered manually with the GitHub actions workflow dispatch button.
+# It runs the -profile 'test' on AWS batch
 
 on:
   workflow_dispatch:
-
-
-env:
-  AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
-  AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
-  TOWER_ACCESS_TOKEN: ${{ secrets.AWS_TOWER_TOKEN }}
-  AWS_JOB_DEFINITION: ${{ secrets.AWS_JOB_DEFINITION }}
-  AWS_JOB_QUEUE: ${{ secrets.AWS_JOB_QUEUE }}
-  AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
-
-
 jobs:
-  run-awstest:
+  run-tower:
     name: Run AWS tests
     if: github.repository == 'nf-core/hic'
     runs-on: ubuntu-latest
     steps:
-      - name: Setup Miniconda
-        uses: conda-incubator/setup-miniconda@v2
+      # Launch workflow using Tower CLI tool action
+      - name: Launch workflow via tower
+        uses: nf-core/tower-action@v3
         with:
-          auto-update-conda: true
-          python-version: 3.7
-      - name: Install awscli
-        run: conda install -c conda-forge awscli
-      - name: Start AWS batch job
-        # For example: adding multiple test runs with different parameters
-        # Remember that you can parallelise this by using strategy.matrix
-        run: |
-          aws batch submit-job \
-          --region eu-west-1 \
-          --job-name nf-core-hic \
-          --job-queue $AWS_JOB_QUEUE \
-          --job-definition $AWS_JOB_DEFINITION \
-          --container-overrides '{"command": ["nf-core/hic", "-r '"${GITHUB_SHA}"' -profile test --outdir s3://'"${AWS_S3_BUCKET}"'/hic/results-'"${GITHUB_SHA}"' -w s3://'"${AWS_S3_BUCKET}"'/hic/work-'"${GITHUB_SHA}"' -with-tower"], "environment": [{"name": "TOWER_ACCESS_TOKEN", "value": "'"$TOWER_ACCESS_TOKEN"'"}]}'
+          workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }}
+          access_token: ${{ secrets.TOWER_ACCESS_TOKEN }}
+          compute_env: ${{ secrets.TOWER_COMPUTE_ENV }}
+          workdir: s3://${{ secrets.AWS_S3_BUCKET }}/work/hic/work-${{ github.sha }}
+          parameters: |
+            {
+              "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/hic/results-test-${{ github.sha }}"
+            }
+          profiles: test,aws_tower
+          nextflow_config: |
+            process.errorStrategy = 'retry'
+            process.maxRetries = 3
diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml
index 3521022c4d8fd8f3ea7171bef94d9c3c96e2514d..363667229e33a8fe022f697e4755d911234a12fb 100644
--- a/.github/workflows/branch.yml
+++ b/.github/workflows/branch.yml
@@ -13,8 +13,7 @@ jobs:
       - name: Check PRs
         if: github.repository == 'nf-core/hic'
         run: |
-          { [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/hic ]] && [[ $GITHUB_HEAD_REF = "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]]
-
+          "{ [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/hic ]] && [[ $GITHUB_HEAD_REF = "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]]"
 
       # If the above check failed, post a comment on the PR explaining the failure
       # NOTE - this doesn't currently work if the PR is coming from a fork, due to limitations in GitHub actions secrets
@@ -43,4 +42,4 @@ jobs:
             Thanks again for your contribution!
           repo-token: ${{ secrets.GITHUB_TOKEN }}
           allow-repeats: false
-
+#
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 0f0db1a211d8626b14bb0d38fa65e12c819d5030..cc0fc52d25b4945b1bf0e460f3ce6527e5cad992 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -8,50 +8,43 @@ on:
   release:
     types: [published]
 
-# Uncomment if we need an edge release of Nextflow again
-# env: NXF_EDGE: 1
+env:
+  NXF_ANSI_LOG: false
+  CAPSULE_LOG: none
 
 jobs:
   test:
-    name: Run workflow tests
+    name: Run pipeline with test data
     # Only run on push if this is the nf-core dev branch (merged PRs)
-    if: ${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/hic') }}
+    if: "${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/hic') }}"
     runs-on: ubuntu-latest
-    env:
-      NXF_VER: ${{ matrix.nxf_ver }}
-      NXF_ANSI_LOG: false
     strategy:
       matrix:
-        # Nextflow versions: check pipeline minimum and current latest
-        nxf_ver: ['20.04.0', '']
+        # Nextflow versions
+        include:
+          # Test pipeline minimum Nextflow version
+          - NXF_VER: "21.10.3"
+            NXF_EDGE: ""
+          # Test latest edge release of Nextflow
+          - NXF_VER: ""
+            NXF_EDGE: "1"
     steps:
       - name: Check out pipeline code
         uses: actions/checkout@v2
 
-      - name: Check if Dockerfile or Conda environment changed
-        uses: technote-space/get-diff-action@v4
-        with:
-          FILES: |
-            Dockerfile
-            environment.yml
-
-      - name: Build new docker image
-        if: env.MATCHED_FILES
-        run: docker build --no-cache . -t nfcore/hic:1.3.0
-
-      - name: Pull docker image
-        if: ${{ !env.MATCHED_FILES }}
-        run: |
-          docker pull nfcore/hic:dev
-          docker tag nfcore/hic:dev nfcore/hic:1.3.0
-
       - name: Install Nextflow
         env:
-          CAPSULE_LOG: none
+          NXF_VER: ${{ matrix.NXF_VER }}
+          # Uncomment only if the edge release is more recent than the latest stable release
+          # See https://github.com/nextflow-io/nextflow/issues/2467
+          # NXF_EDGE: ${{ matrix.NXF_EDGE }}
         run: |
           wget -qO- get.nextflow.io | bash
           sudo mv nextflow /usr/local/bin/
 
       - name: Run pipeline with test data
         run: |
-          nextflow run ${GITHUB_WORKSPACE} -profile test,docker
\ No newline at end of file
+          nextflow run ${GITHUB_WORKSPACE} -profile test,docker --outdir ./results
+
+#
+
diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml
index fcde400cedbc1566f84e8a811e0b45a1c113df60..e9cf5de3ac6a3a8a41923568a170315053d7e2e0 100644
--- a/.github/workflows/linting.yml
+++ b/.github/workflows/linting.yml
@@ -1,6 +1,7 @@
 name: nf-core linting
 # This workflow is triggered on pushes and PRs to the repository.
-# It runs the `nf-core lint` and markdown lint tests to ensure that the code meets the nf-core guidelines
+# It runs the `nf-core lint` and markdown lint tests to ensure
+# that the code meets the nf-core guidelines.
 on:
   push:
   pull_request:
@@ -8,87 +9,35 @@ on:
     types: [published]
 
 jobs:
-  Markdown:
+  EditorConfig:
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout@v2
-      - uses: actions/setup-node@v1
-        with:
-          node-version: '10'
-      - name: Install markdownlint
-        run: npm install -g markdownlint-cli
-      - name: Run Markdownlint
-        run: markdownlint ${GITHUB_WORKSPACE} -c ${GITHUB_WORKSPACE}/.github/markdownlint.yml
-
-      # If the above check failed, post a comment on the PR explaining the failure
-      - name: Post PR comment
-        if: failure()
-        uses: mshick/add-pr-comment@v1
-        with:
-          message: |
-            ## Markdown linting is failing
-
-            To keep the code consistent with lots of contributors, we run automated code consistency checks.
-            To fix this CI test, please run:
 
-            * Install `markdownlint-cli`
-                * On Mac: `brew install markdownlint-cli`
-                * Everything else: [Install `npm`](https://www.npmjs.com/get-npm) then [install `markdownlint-cli`](https://www.npmjs.com/package/markdownlint-cli) (`npm install -g markdownlint-cli`)
-            * Fix the markdown errors
-                * Automatically: `markdownlint . --config .github/markdownlint.yml --fix`
-                * Manually resolve anything left from `markdownlint . --config .github/markdownlint.yml`
+      - uses: actions/setup-node@v2
 
-            Once you push these changes the test should pass, and you can hide this comment :+1:
+      - name: Install editorconfig-checker
+        run: npm install -g editorconfig-checker
 
-            We highly recommend setting up markdownlint in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help!
+      - name: Run ECLint check
+        run: editorconfig-checker -exclude README.md $(find .* -type f | grep -v '.git\|.py\|.md\|json\|yml\|yaml\|html\|css\|work\|.nextflow\|build\|nf_core.egg-info\|log.txt\|Makefile')
 
-            Thanks again for your contribution!
-          repo-token: ${{ secrets.GITHUB_TOKEN }}
-          allow-repeats: false
-
-
-  YAML:
+  Prettier:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v1
-      - uses: actions/setup-node@v1
-        with:
-          node-version: '10'
-      - name: Install yaml-lint
-        run: npm install -g yaml-lint
-      - name: Run yaml-lint
-        run: yamllint $(find ${GITHUB_WORKSPACE} -type f -name "*.yml" -o -name "*.yaml")
-
-      # If the above check failed, post a comment on the PR explaining the failure
-      - name: Post PR comment
-        if: failure()
-        uses: mshick/add-pr-comment@v1
-        with:
-          message: |
-            ## YAML linting is failing
-
-            To keep the code consistent with lots of contributors, we run automated code consistency checks.
-            To fix this CI test, please run:
-
-            * Install `yaml-lint`
-                * [Install `npm`](https://www.npmjs.com/get-npm) then [install `yaml-lint`](https://www.npmjs.com/package/yaml-lint) (`npm install -g yaml-lint`)
-            * Fix the markdown errors
-                * Run the test locally: `yamllint $(find . -type f -name "*.yml" -o -name "*.yaml")`
-                * Fix any reported errors in your YAML files
-
-            Once you push these changes the test should pass, and you can hide this comment :+1:
+      - uses: actions/checkout@v2
 
-            We highly recommend setting up yaml-lint in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help!
+      - uses: actions/setup-node@v2
 
-            Thanks again for your contribution!
-          repo-token: ${{ secrets.GITHUB_TOKEN }}
-          allow-repeats: false
+      - name: Install Prettier
+        run: npm install -g prettier
 
+      - name: Run Prettier --check
+        run: prettier --check ${GITHUB_WORKSPACE}
 
   nf-core:
     runs-on: ubuntu-latest
     steps:
-
       - name: Check out pipeline code
         uses: actions/checkout@v2
 
@@ -101,8 +50,8 @@ jobs:
 
       - uses: actions/setup-python@v1
         with:
-          python-version: '3.6'
-          architecture: 'x64'
+          python-version: "3.6"
+          architecture: "x64"
 
       - name: Install dependencies
         run: |
@@ -114,7 +63,7 @@ jobs:
           GITHUB_COMMENTS_URL: ${{ github.event.pull_request.comments_url }}
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           GITHUB_PR_COMMIT: ${{ github.event.pull_request.head.sha }}
-        run: nf-core -l lint_log.txt lint ${GITHUB_WORKSPACE} --markdown lint_results.md
+        run: nf-core -l lint_log.txt lint --dir ${GITHUB_WORKSPACE} --markdown lint_results.md
 
       - name: Save PR number
         if: ${{ always() }}
@@ -130,3 +79,4 @@ jobs:
             lint_results.md
             PR_number.txt
 
+#
diff --git a/.github/workflows/linting_comment.yml b/.github/workflows/linting_comment.yml
index 90f03c6f91ba0c05d12daff7e15840145a63937f..91c487a1a5020bf2b0ff8d31a3c2b9d589e5fed8 100644
--- a/.github/workflows/linting_comment.yml
+++ b/.github/workflows/linting_comment.yml
@@ -1,4 +1,3 @@
-
 name: nf-core linting comment
 # This workflow is triggered after the linting action is complete
 # It posts an automated comment to the PR, even if the PR is coming from a fork
@@ -15,6 +14,7 @@ jobs:
         uses: dawidd6/action-download-artifact@v2
         with:
           workflow: linting.yml
+          workflow_conclusion: completed
 
       - name: Get PR number
         id: pr_number
@@ -26,4 +26,4 @@ jobs:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           number: ${{ steps.pr_number.outputs.pr_number }}
           path: linting-logs/lint_results.md
-
+#
diff --git a/.github/workflows/push_dockerhub_dev.yml b/.github/workflows/push_dockerhub_dev.yml
deleted file mode 100644
index d6fc716fb947d262e0613ccab07655d387a98d1f..0000000000000000000000000000000000000000
--- a/.github/workflows/push_dockerhub_dev.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-name: nf-core Docker push (dev)
-# This builds the docker image and pushes it to DockerHub
-# Runs on nf-core repo releases and push event to 'dev' branch (PR merges)
-on:
-  push:
-    branches:
-      - dev
-
-jobs:
-  push_dockerhub:
-    name: Push new Docker image to Docker Hub (dev)
-    runs-on: ubuntu-latest
-    # Only run for the nf-core repo, for releases and merged PRs
-    if: ${{ github.repository == 'nf-core/hic' }}
-    env:
-      DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
-      DOCKERHUB_PASS: ${{ secrets.DOCKERHUB_PASS }}
-    steps:
-      - name: Check out pipeline code
-        uses: actions/checkout@v2
-
-      - name: Build new docker image
-        run: docker build --no-cache . -t nfcore/hic:dev
-
-      - name: Push Docker image to DockerHub (dev)
-        run: |
-          echo "$DOCKERHUB_PASS" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin
-          docker push nfcore/hic:dev
diff --git a/.github/workflows/push_dockerhub_release.yml b/.github/workflows/push_dockerhub_release.yml
deleted file mode 100644
index eda09ccfb6fcdd5791c56b6875343dc2bbf9c8c0..0000000000000000000000000000000000000000
--- a/.github/workflows/push_dockerhub_release.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-name: nf-core Docker push (release)
-# This builds the docker image and pushes it to DockerHub
-# Runs on nf-core repo releases and push event to 'dev' branch (PR merges)
-on:
-  release:
-    types: [published]
-
-jobs:
-  push_dockerhub:
-    name: Push new Docker image to Docker Hub (release)
-    runs-on: ubuntu-latest
-    # Only run for the nf-core repo, for releases and merged PRs
-    if: ${{ github.repository == 'nf-core/hic' }}
-    env:
-      DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
-      DOCKERHUB_PASS: ${{ secrets.DOCKERHUB_PASS }}
-    steps:
-      - name: Check out pipeline code
-        uses: actions/checkout@v2
-
-      - name: Build new docker image
-        run: docker build --no-cache . -t nfcore/hic:latest
-
-      - name: Push Docker image to DockerHub (release)
-        run: |
-          echo "$DOCKERHUB_PASS" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin
-          docker push nfcore/hic:latest
-          docker tag nfcore/hic:latest nfcore/hic:${{ github.event.release.tag_name }}
-          docker push nfcore/hic:${{ github.event.release.tag_name }}
diff --git a/.gitignore b/.gitignore
index aa4bb5b375a9021f754dbd91d2321d16d1c0afc7..5124c9ac77e036998a69ae8e8e89f9ff6f9bcdb3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,7 +3,6 @@ work/
 data/
 results/
 .DS_Store
-tests/
 testing/
 testing*
 *.pyc
diff --git a/.gitpod.yml b/.gitpod.yml
new file mode 100644
index 0000000000000000000000000000000000000000..85d95ecc8eca8c1d7110ba1dd58d649e19bdc008
--- /dev/null
+++ b/.gitpod.yml
@@ -0,0 +1,14 @@
+image: nfcore/gitpod:latest
+
+vscode:
+  extensions: # based on nf-core.nf-core-extensionpack
+    - codezombiech.gitignore # Language support for .gitignore files
+    # - cssho.vscode-svgviewer                 # SVG viewer
+    - esbenp.prettier-vscode # Markdown/CommonMark linting and style checking for Visual Studio Code
+    - eamodio.gitlens # Quickly glimpse into whom, why, and when a line or code block was changed
+    - EditorConfig.EditorConfig # override user/workspace settings with settings found in .editorconfig files
+    - Gruntfuggly.todo-tree # Display TODO and FIXME in a tree view in the activity bar
+    - mechatroner.rainbow-csv # Highlight columns in csv files in different colors
+    # - nextflow.nextflow                      # Nextflow syntax highlighting
+    - oderwat.indent-rainbow # Highlight indentation level
+    - streetsidesoftware.code-spell-checker # Spelling checker for source code
diff --git a/.prettierrc.yml b/.prettierrc.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c81f9a7660b0c0b8df99c5c3f87ce16ef0783917
--- /dev/null
+++ b/.prettierrc.yml
@@ -0,0 +1 @@
+printWidth: 120
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fcf522d7d2038c7f1fd424fb82478a325f3db82e..4478bb04fdeb86973c2edc2e935854a1321e5bdc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,14 +3,145 @@
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
-## v1.3.0 - [date]
+## v1.4.0
 
-Initial release of nf-core/hic, created with the [nf-core](https://nf-co.re/) template.
+### `Added`
+* DSL2 version of nf-core-hic pipeline
+
+## v1.3.1 - 2021-09-25
+
+### `Fixed`
+
+* Fix bug in conda environment for cooltools (#109)
+
+## v1.3.0 - 2021-05-22
+
+* Change the `/tmp/` folder to `./tmp/` folder so that all tmp files are now in the work directory (#24)
+* Add `--hicpro_maps` options to generate the raw and normalized HiC-Pro maps. The default is now to use cooler
+* Add chromosome compartments calling with cooltools (#53)
+* Add HiCExplorer distance decay quality control (#54)
+* Add HiCExplorer TADs calling (#55)
+* Add insulation score TADs calling (#55)
+* Generate cooler/txt contact maps
+* Normalize Hi-C data with cooler instead of iced
+* New `--digestion` parameter to automatically set the restriction_site and ligation_site motifs
+* New `--keep_multi` and `keep_dup` options. Default: false
+* Template update for nf-core/tools
+* Minor fix to summary log messages in pipeline header
+
+### `Fixed`
+
+* Fix bug in stats report which were not all correcly exported in the results folder
+* Fix recurrent bug in input file extension (#86)
+* Fix bug in `--bin_size` parameter (#85)
+* `--min_mapq` is ignored if `--keep_multi` is used
+
+### `Deprecated`
+
+* `--rm_dup` and `--rm_multi` are replaced by `--keep_dups` and `--keep_multi`
+
+## v1.2.2 - 2020-09-02
 
 ### `Added`
 
+* Template update for nf-core/tools v1.10.2
+* Add the `--fastq_chunks_size` to specify the number of reads per chunks if split_fastq is true
+
 ### `Fixed`
 
-### `Dependencies`
+* Bug in `--split_fastq` option not recognized
+
+## v1.2.1 - 2020-07-06
+
+### `Fixed`
+
+* Fix issue with `--fasta` option and `.fa` extension (#66)
+
+## v1.2.0 - 2020-06-18
+
+### `Added`
+
+* Bump v1.2.0
+* Merge template nf-core 1.9
+* Move some options to camel_case
+* Update python scripts for python3
+* Update conda environment file
+  * python base `2.7.15` > `3.7.6`
+  * pip `19.1` > `20.0.1`
+  * scipy `1.2.1` > `1.4.1`
+  * numpy `1.16.3` > `1.18.1`
+  * bx-python `0.8.2` > `0.8.8`
+  * pysam `0.15.2` > `0.15.4`
+  * cooler `0.8.5` > `0.8.6`
+  * multiqc `1.7` > `1.8`
+  * iced `0.5.1` > `0.5.6`
+  * *_New_* pymdown-extensions `7.1`
+  * *_New_* hicexplorer `3.4.3`
+  * *_New_* bioconductor-hitc `1.32.0`
+  * *_New_* r-optparse `1.6.6`
+  * *_New_* ucsc-bedgraphtobigwig `377`
+  * *_New_* cython `0.29.19`
+  * *_New_* cooltools `0.3.2`
+  * *_New_* fanc `0.8.30`
+  * *_Removed_* r-markdown
+
+### `Fixed`
+
+* Fix error in doc for Arima kit usage
+* Sort output of `get_valid_interaction` process as the input files of `remove_duplicates`
+are expected to be sorted (sort -m)
 
 ### `Deprecated`
+
+* Command line options converted to `camel_case`:
+  * `--skipMaps` > `--skip_maps`
+  * `--skipIce` > `--skip_ice`
+  * `--skipCool` > `--skip_cool`
+  * `--skipMultiQC` > `--skip_multiqc`
+  * `--saveReference` > `--save_reference`
+  * `--saveAlignedIntermediates` > `--save_aligned_intermediates`
+  * `--saveInteractionBAM` > `--save_interaction_bam`
+
+## v1.1.1 - 2020-04-02
+
+### `Fixed`
+
+* Fix bug in tag. Remove '['
+
+## v1.1.0 - 2019-10-15
+
+### `Added`
+
+* Update hicpro2higlass with `-p` parameter
+* Support 'N' base motif in restriction/ligation sites
+* Support multiple restriction enzymes/ligattion sites (comma separated) ([#31](https://github.com/nf-core/hic/issues/31))
+* Add --saveInteractionBAM option
+* Add DOI ([#29](https://github.com/nf-core/hic/issues/29))
+* Update manual ([#28](https://github.com/nf-core/hic/issues/28))
+
+### `Fixed`
+
+* Fix bug for reads extension `_1`/`_2` ([#30](https://github.com/nf-core/hic/issues/30))
+
+## v1.0 - [2019-05-06]
+
+Initial release of nf-core/hic, created with the [nf-core](http://nf-co.re/) template.
+
+### `Added`
+
+First version of nf-core Hi-C pipeline which is a Nextflow implementation of
+the [HiC-Pro pipeline](https://github.com/nservant/HiC-Pro/).
+Note that all HiC-Pro functionalities are not yet all implemented.
+The current version supports most protocols including Hi-C, in situ Hi-C,
+DNase Hi-C, Micro-C, capture-C or HiChip data.
+
+In summary, this version allows :
+
+* Automatic detection and generation of annotation files based on igenomes
+if not provided.
+* Two-steps alignment of raw sequencing reads
+* Reads filtering and detection of valid interaction products
+* Generation of raw contact matrices for a set of resolutions
+* Normalization of the contact maps using the ICE algorithm
+* Generation of cooler file for visualization on [higlass](https://higlass.io/)
+* Quality report based on HiC-Pro MultiQC module
diff --git a/CITATIONS.md b/CITATIONS.md
index b5ba945a42bf069488594e576beba9fa95929c2f..bf9ffcab5851396e0ab201225ccdbd439ba39684 100644
--- a/CITATIONS.md
+++ b/CITATIONS.md
@@ -1,10 +1,10 @@
 # nf-core/hic: Citations
 
+
 ## [HiC-Pro](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-015-0831-x)
 
 > Servant N, Varoquaux N, Lajoie BR, Viara E, Chen C, Vert JP, Dekker J, Heard E, Barillot E. Genome Biology 2015, 16:259 doi: [10.1186/s13059-015-0831-x](https://dx.doi.org/10.1186/s13059-015-0831-x)
 
-
 ## [nf-core](https://pubmed.ncbi.nlm.nih.gov/32055031/)
 
 > Ewels PA, Peltzer A, Fillinger S, Patel H, Alneberg J, Wilm A, Garcia MU, Di Tommaso P, Nahnsen S. The nf-core framework for community-curated bioinformatics pipelines. Nat Biotechnol. 2020 Mar;38(3):276-278. doi: 10.1038/s41587-020-0439-x. PubMed PMID: 32055031.
@@ -15,23 +15,27 @@
 
 ## Pipeline tools
 
-* [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)
+- [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)
 
-* [MultiQC](https://www.ncbi.nlm.nih.gov/pubmed/27312411/)
-    > Ewels P, Magnusson M, Lundin S, Käller M. MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics. 2016 Oct 1;32(19):3047-8. doi: 10.1093/bioinformatics/btw354. Epub 2016 Jun 16. PubMed PMID: 27312411; PubMed Central PMCID: PMC5039924.
+- [MultiQC](https://pubmed.ncbi.nlm.nih.gov/27312411/)
+  > Ewels P, Magnusson M, Lundin S, Käller M. MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics. 2016 Oct 1;32(19):3047-8. doi: 10.1093/bioinformatics/btw354. Epub 2016 Jun 16. PubMed PMID: 27312411; PubMed Central PMCID: PMC5039924.
 
 ## Software packaging/containerisation tools
 
-* [Anaconda](https://anaconda.com)
-    > Anaconda Software Distribution. Computer software. Vers. 2-2.4.0. Anaconda, Nov. 2016. Web.
+- [Anaconda](https://anaconda.com)
+
+  > Anaconda Software Distribution. Computer software. Vers. 2-2.4.0. Anaconda, Nov. 2016. Web.
+
+- [Bioconda](https://pubmed.ncbi.nlm.nih.gov/29967506/)
+
+  > Grüning B, Dale R, Sjödin A, Chapman BA, Rowe J, Tomkins-Tinch CH, Valieris R, Köster J; Bioconda Team. Bioconda: sustainable and comprehensive software distribution for the life sciences. Nat Methods. 2018 Jul;15(7):475-476. doi: 10.1038/s41592-018-0046-7. PubMed PMID: 29967506.
+
+- [BioContainers](https://pubmed.ncbi.nlm.nih.gov/28379341/)
 
-* [Bioconda](https://pubmed.ncbi.nlm.nih.gov/29967506/)
-    > Grüning B, Dale R, Sjödin A, Chapman BA, Rowe J, Tomkins-Tinch CH, Valieris R, Köster J; Bioconda Team. Bioconda: sustainable and comprehensive software distribution for the life sciences. Nat Methods. 2018 Jul;15(7):475-476. doi: 10.1038/s41592-018-0046-7. PubMed PMID: 29967506.
+  > da Veiga Leprevost F, Grüning B, Aflitos SA, Röst HL, Uszkoreit J, Barsnes H, Vaudel M, Moreno P, Gatto L, Weber J, Bai M, Jimenez RC, Sachsenberg T, Pfeuffer J, Alvarez RV, Griss J, Nesvizhskii AI, Perez-Riverol Y. BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics. 2017 Aug 15;33(16):2580-2582. doi: 10.1093/bioinformatics/btx192. PubMed PMID: 28379341; PubMed Central PMCID: PMC5870671.
 
-* [BioContainers](https://pubmed.ncbi.nlm.nih.gov/28379341/)
-    > da Veiga Leprevost F, Grüning B, Aflitos SA, Röst HL, Uszkoreit J, Barsnes H, Vaudel M, Moreno P, Gatto L, Weber J, Bai M, Jimenez RC, Sachsenberg T, Pfeuffer J, Alvarez RV, Griss J, Nesvizhskii AI, Perez-Riverol Y. BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics. 2017 Aug 15;33(16):2580-2582. doi: 10.1093/bioinformatics/btx192. PubMed PMID: 28379341; PubMed Central PMCID: PMC5870671.
+- [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241)
 
-* [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241)
+- [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/)
+  > Kurtzer GM, Sochat V, Bauer MW. Singularity: Scientific containers for mobility of compute. PLoS One. 2017 May 11;12(5):e0177459. doi: 10.1371/journal.pone.0177459. eCollection 2017. PubMed PMID: 28494014; PubMed Central PMCID: PMC5426675.
 
-* [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/)
-    > Kurtzer GM, Sochat V, Bauer MW. Singularity: Scientific containers for mobility of compute. PLoS One. 2017 May 11;12(5):e0177459. doi: 10.1371/journal.pone.0177459. eCollection 2017. PubMed PMID: 28494014; PubMed Central PMCID: PMC5426675.
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index 05547b6ad9405aef4f6e7bdf999ac011d085d187..0000000000000000000000000000000000000000
--- a/Dockerfile
+++ /dev/null
@@ -1,20 +0,0 @@
-FROM nfcore/base:1.14
-LABEL authors="Nicolas Servant" \
-      description="Docker image containing all software requirements for the nf-core/hic pipeline"
-
-## Install gcc for pip iced install
-RUN apt-get update && apt-get install -y gcc g++ && apt-get clean -y
-
-# Install the conda environment
-COPY environment.yml /
-RUN conda env create --quiet -f /environment.yml && conda clean -a
-
-# Add conda installation dir to PATH (instead of doing 'conda activate')
-ENV PATH /opt/conda/envs/nf-core-hic-1.3.0/bin:$PATH
-
-# Dump the details of the installed packages to a file for posterity
-RUN conda env export --name nf-core-hic-1.3.0 > nf-core-hic-1.3.0.yml
-
-# Instruct R processes to use these empty files instead of clashing with a local version
-RUN touch .Rprofile
-RUN touch .Renviron
diff --git a/README.md b/README.md
index 984759cb1339a8cef364f457a3e6c71652ed1674..7c6a3645fe39fd101c7f5a07dbbf46f312d69dd5 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,10 @@
-# ![nf-core/hic](docs/images/nf-core-hic_logo.png)
+# ![nf-core/hic](docs/images/nf-core/hic_logo_light.png#gh-light-mode-only) ![nf-core/hic](docs/images/nf-core/hic_logo_dark.png#gh-dark-mode-only)
 
 [![GitHub Actions CI Status](https://github.com/nf-core/hic/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/hic/actions?query=workflow%3A%22nf-core+CI%22)
 [![GitHub Actions Linting Status](https://github.com/nf-core/hic/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/hic/actions?query=workflow%3A%22nf-core+linting%22)
 [![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/hic/results)
 [![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX)
-
-[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.04.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)
+[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.10.3-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)
 [![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)
 [![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)
 [![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)
@@ -45,23 +44,26 @@ On release, automated continuous integration tests run the pipeline on a full-si
 
 1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)
 
-2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_
+2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.
 
 3. Download the pipeline and test it on a minimal dataset with a single command:
 
-    ```console
-    nextflow run nf-core/hic -profile test,<docker/singularity/podman/shifter/charliecloud/conda/institute>
-    ```
+   ```console
+   nextflow run nf-core/hic -profile test,YOURPROFILE --outdir <OUTDIR>
+   ```
+
+   Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.
 
-    > * Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile <institute>` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.
-    > * If you are using `singularity` then the pipeline will auto-detect this and attempt to download the Singularity images directly as opposed to performing a conversion from Docker images. If you are persistently observing issues downloading Singularity images directly due to timeout or network issues then please use the `--singularity_pull_docker_container` parameter to pull and convert the Docker image instead. Alternatively, it is highly recommended to use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to pre-download all of the required containers before running the pipeline and to set the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options to be able to store and re-use the images from a central location for future pipeline runs.
-    > * If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.
+   > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.
+   > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile <institute>` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.
+   > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.
+   > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.
 
 4. Start running your own analysis!
 
-    ```console
-    nextflow run nf-core/hic -profile <docker/singularity/podman/shifter/charliecloud/conda/institute> --input samplesheet.csv --genome GRCh37 --digestion 'dpnii'
-    ```
+   ```console
+   nextflow run nf-core/hic --input samplesheet.csv --outdir <OUTDIR> --genome GRCh37 -profile <docker/singularity/podman/shifter/charliecloud/conda/institute>
+   ```
 
 ## Documentation
 
@@ -79,11 +81,12 @@ For further information or help, don't hesitate to get in touch on the [Slack `#
 
 ## Citations
 
-
 If you use  nf-core/hic for your analysis, please cite it using the following doi: doi: [10.5281/zenodo.2669513](https://doi.org/10.5281/zenodo.2669513)
 
 An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.
 
+An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.
+
 You can cite the `nf-core` publication as follows:
 
 > **The nf-core framework for community-curated bioinformatics pipelines.**
diff --git a/assets/email_template.html b/assets/email_template.html
index d207f011f3ab74ac04ace7a4e277c59900cf058a..80d5dac884d9d5032c63f7be02e8bb7c788a51d7 100644
--- a/assets/email_template.html
+++ b/assets/email_template.html
@@ -1,53 +1,111 @@
 <html>
-<head>
-  <meta charset="utf-8">
-  <meta http-equiv="X-UA-Compatible" content="IE=edge">
-  <meta name="viewport" content="width=device-width, initial-scale=1">
+  <head>
+    <meta charset="utf-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+    <meta name="viewport" content="width=device-width, initial-scale=1" />
 
-  <meta name="description" content="nf-core/hic: Analysis of Chromosome Conformation Capture data (Hi-C)">
-  <title>nf-core/hic Pipeline Report</title>
-</head>
-<body>
-<div style="font-family: Helvetica, Arial, sans-serif; padding: 30px; max-width: 800px; margin: 0 auto;">
+    <!-- prettier-ignore -->
+    <meta name="description" content="nf-core/hic: Analysis of Chromosome Conformation Capture data (Hi-C)" />
+    <title>nf-core/hic Pipeline Report</title>
+  </head>
+  <body>
+    <div style="font-family: Helvetica, Arial, sans-serif; padding: 30px; max-width: 800px; margin: 0 auto">
+      <img src="cid:nfcorepipelinelogo" />
 
-<img src="cid:nfcorepipelinelogo">
+      <h1>nf-core/hic v${version}</h1>
+      <h2>Run Name: $runName</h2>
 
-<h1>nf-core/hic v${version}</h1>
-<h2>Run Name: $runName</h2>
-
-<% if (!success){
-    out << """
-    <div style="color: #a94442; background-color: #f2dede; border-color: #ebccd1; padding: 15px; margin-bottom: 20px; border: 1px solid transparent; border-radius: 4px;">
-        <h4 style="margin-top:0; color: inherit;">nf-core/hic execution completed unsuccessfully!</h4>
+      <% if (!success){ out << """
+      <div
+        style="
+          color: #a94442;
+          background-color: #f2dede;
+          border-color: #ebccd1;
+          padding: 15px;
+          margin-bottom: 20px;
+          border: 1px solid transparent;
+          border-radius: 4px;
+        "
+      >
+        <h4 style="margin-top: 0; color: inherit">nf-core/hic execution completed unsuccessfully!</h4>
         <p>The exit status of the task that caused the workflow execution to fail was: <code>$exitStatus</code>.</p>
         <p>The full error message was:</p>
-        <pre style="white-space: pre-wrap; overflow: visible; margin-bottom: 0;">${errorReport}</pre>
-    </div>
-    """
-} else {
-    out << """
-    <div style="color: #3c763d; background-color: #dff0d8; border-color: #d6e9c6; padding: 15px; margin-bottom: 20px; border: 1px solid transparent; border-radius: 4px;">
+        <pre style="white-space: pre-wrap; overflow: visible; margin-bottom: 0">${errorReport}</pre>
+      </div>
+      """ } else { out << """
+      <div
+        style="
+          color: #3c763d;
+          background-color: #dff0d8;
+          border-color: #d6e9c6;
+          padding: 15px;
+          margin-bottom: 20px;
+          border: 1px solid transparent;
+          border-radius: 4px;
+        "
+      >
         nf-core/hic execution completed successfully!
-    </div>
-    """
-}
-%>
+      </div>
+      """ } %>
 
-<p>The workflow was completed at <strong>$dateComplete</strong> (duration: <strong>$duration</strong>)</p>
-<p>The command used to launch the workflow was as follows:</p>
-<pre style="white-space: pre-wrap; overflow: visible; background-color: #ededed; padding: 15px; border-radius: 4px; margin-bottom:30px;">$commandLine</pre>
+      <p>The workflow was completed at <strong>$dateComplete</strong> (duration: <strong>$duration</strong>)</p>
+      <p>The command used to launch the workflow was as follows:</p>
+      <pre
+        style="
+          white-space: pre-wrap;
+          overflow: visible;
+          background-color: #ededed;
+          padding: 15px;
+          border-radius: 4px;
+          margin-bottom: 30px;
+        "
+      >
+$commandLine</pre
+      >
 
-<h3>Pipeline Configuration:</h3>
-<table style="width:100%; max-width:100%; border-spacing: 0; border-collapse: collapse; border:0; margin-bottom: 30px;">
-    <tbody style="border-bottom: 1px solid #ddd;">
-        <% out << summary.collect{ k,v -> "<tr><th style='text-align:left; padding: 8px 0; line-height: 1.42857143; vertical-align: top; border-top: 1px solid #ddd;'>$k</th><td style='text-align:left; padding: 8px; line-height: 1.42857143; vertical-align: top; border-top: 1px solid #ddd;'><pre style='white-space: pre-wrap; overflow: visible;'>$v</pre></td></tr>" }.join("\n") %>
-    </tbody>
-</table>
+      <h3>Pipeline Configuration:</h3>
+      <table
+        style="
+          width: 100%;
+          max-width: 100%;
+          border-spacing: 0;
+          border-collapse: collapse;
+          border: 0;
+          margin-bottom: 30px;
+        "
+      >
+        <tbody style="border-bottom: 1px solid #ddd">
+          <% out << summary.collect{ k,v -> "
+          <tr>
+            <th
+              style="
+                text-align: left;
+                padding: 8px 0;
+                line-height: 1.42857143;
+                vertical-align: top;
+                border-top: 1px solid #ddd;
+              "
+            >
+              $k
+            </th>
+            <td
+              style="
+                text-align: left;
+                padding: 8px;
+                line-height: 1.42857143;
+                vertical-align: top;
+                border-top: 1px solid #ddd;
+              "
+            >
+              <pre style="white-space: pre-wrap; overflow: visible">$v</pre>
+            </td>
+          </tr>
+          " }.join("\n") %>
+        </tbody>
+      </table>
 
-<p>nf-core/hic</p>
-<p><a href="https://github.com/nf-core/hic">https://github.com/nf-core/hic</a></p>
-
-</div>
-
-</body>
+      <p>nf-core/hic</p>
+      <p><a href="https://github.com/nf-core/hic">https://github.com/nf-core/hic</a></p>
+    </div>
+  </body>
 </html>
diff --git a/assets/multiqc_config.yml b/assets/multiqc_config.yml
index 41468cab303a5894aa01e0823790b22cb44c95cd..e371ccf83ba8d11c96891ddae6c55e120e370667 100644
--- a/assets/multiqc_config.yml
+++ b/assets/multiqc_config.yml
@@ -1,11 +1,11 @@
 report_comment: >
-    This report has been generated by the <a href="https://github.com/nf-core/hic" target="_blank">nf-core/hic</a>
-    analysis pipeline. For information about how to interpret these results, please see the
-    <a href="https://github.com/nf-core/hic" target="_blank">documentation</a>.
+  This report has been generated by the <a href="https://github.com/nf-core/hic" target="_blank">nf-core/hic</a>
+  analysis pipeline. For information about how to interpret these results, please see the
+  <a href="https://nf-co.re/hic" target="_blank">documentation</a>.
 report_section_order:
-    software_versions:
-        order: -1000
-    nf-core-hic-summary:
-        order: -1001
+  software_versions:
+    order: -1000
+  "nf-core-hic-summary":
+    order: -1001
 
 export_plots: true
diff --git a/assets/nf-core-hic_logo.png b/assets/nf-core-hic_logo.png
deleted file mode 100644
index 37461d9a32ae1f73d9090a3a2387cf8997c9a0ed..0000000000000000000000000000000000000000
Binary files a/assets/nf-core-hic_logo.png and /dev/null differ
diff --git a/assets/nf-core-hic_logo_light.png b/assets/nf-core-hic_logo_light.png
new file mode 100644
index 0000000000000000000000000000000000000000..553c19d982fcf9e1e0c9a5f325a8ff290fc087f9
Binary files /dev/null and b/assets/nf-core-hic_logo_light.png differ
diff --git a/assets/schema_input.json b/assets/schema_input.json
index 1c3f0f7be19d1d007f3a5ab083adf91cb5cf72dc..fae1a32c7c094ab593690ee24f0544cf725b5b13 100644
--- a/assets/schema_input.json
+++ b/assets/schema_input.json
@@ -31,9 +31,6 @@
                 ]
             }
         },
-        "required": [
-            "sample",
-            "fastq_1"
-        ]
+        "required": ["sample", "fastq_1"]
     }
 }
diff --git a/assets/sendmail_template.txt b/assets/sendmail_template.txt
index 6213286c8a3c027fb1fb4496c938fbe3ed7909d3..3c7f7236d7a173bc0d6c1dc6c5cd1078d5a83786 100644
--- a/assets/sendmail_template.txt
+++ b/assets/sendmail_template.txt
@@ -12,9 +12,9 @@ $email_html
 Content-Type: image/png;name="nf-core-hic_logo.png"
 Content-Transfer-Encoding: base64
 Content-ID: <nfcorepipelinelogo>
-Content-Disposition: inline; filename="nf-core-hic_logo.png"
+Content-Disposition: inline; filename="nf-core-hic_logo_light.png"
 
-<% out << new File("$projectDir/assets/nf-core-hic_logo.png").
+<% out << new File("$projectDir/assets/nf-core-hic_logo_light.png").
     bytes.
     encodeBase64().
     toString().
diff --git a/bin/check_samplesheet.py b/bin/check_samplesheet.py
index 73d28f10252c72d7a6ffcec3f4b8742e40330b0e..d70498fccedfe0e83caf99b1b1e0f13d3be45dfa 100755
--- a/bin/check_samplesheet.py
+++ b/bin/check_samplesheet.py
@@ -1,142 +1,248 @@
 #!/usr/bin/env python
 
-# This script is based on the example at: https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv
+"""Provide a command line tool to validate and transform tabular samplesheets."""
+
 
-import os
-import sys
-import errno
 import argparse
+import csv
+import logging
+import sys
+from collections import Counter
+from pathlib import Path
 
 
-def parse_args(args=None):
-    Description = "Reformat nf-core/hic samplesheet file and check its contents."
-    Epilog = "Example usage: python check_samplesheet.py <FILE_IN> <FILE_OUT>"
+logger = logging.getLogger()
 
-    parser = argparse.ArgumentParser(description=Description, epilog=Epilog)
-    parser.add_argument("FILE_IN", help="Input samplesheet file.")
-    parser.add_argument("FILE_OUT", help="Output file.")
-    return parser.parse_args(args)
 
+class RowChecker:
+    """
+    Define a service that can validate and transform each given row.
 
-def make_dir(path):
-    if len(path) > 0:
-        try:
-            os.makedirs(path)
-        except OSError as exception:
-            if exception.errno != errno.EEXIST:
-                raise exception
+    Attributes:
+        modified (list): A list of dicts, where each dict corresponds to a previously
+            validated and transformed row. The order of rows is maintained.
 
+    """
 
-def print_error(error, context="Line", context_str=""):
-    error_str = "ERROR: Please check samplesheet -> {}".format(error)
-    if context != "" and context_str != "":
-        error_str = "ERROR: Please check samplesheet -> {}\n{}: '{}'".format(
-            error, context.strip(), context_str.strip()
+    VALID_FORMATS = (
+        ".fq.gz",
+        ".fastq.gz",
+    )
+
+    def __init__(
+        self,
+        sample_col="sample",
+        first_col="fastq_1",
+        second_col="fastq_2",
+        single_col="single_end",
+        **kwargs,
+    ):
+        """
+        Initialize the row checker with the expected column names.
+
+        Args:
+            sample_col (str): The name of the column that contains the sample name
+                (default "sample").
+            first_col (str): The name of the column that contains the first (or only)
+                FASTQ file path (default "fastq_1").
+            second_col (str): The name of the column that contains the second (if any)
+                FASTQ file path (default "fastq_2").
+            single_col (str): The name of the new column that will be inserted and
+                records whether the sample contains single- or paired-end sequencing
+                reads (default "single_end").
+
+        """
+        super().__init__(**kwargs)
+        self._sample_col = sample_col
+        self._first_col = first_col
+        self._second_col = second_col
+        self._single_col = single_col
+        self._seen = set()
+        self.modified = []
+
+    def validate_and_transform(self, row):
+        """
+        Perform all validations on the given row and insert the read pairing status.
+
+        Args:
+            row (dict): A mapping from column headers (keys) to elements of that row
+                (values).
+
+        """
+        self._validate_sample(row)
+        self._validate_first(row)
+        self._validate_second(row)
+        self._validate_pair(row)
+        self._seen.add((row[self._sample_col], row[self._first_col]))
+        self.modified.append(row)
+
+    def _validate_sample(self, row):
+        """Assert that the sample name exists and convert spaces to underscores."""
+        assert len(row[self._sample_col]) > 0, "Sample input is required."
+        # Sanitize samples slightly.
+        row[self._sample_col] = row[self._sample_col].replace(" ", "_")
+
+    def _validate_first(self, row):
+        """Assert that the first FASTQ entry is non-empty and has the right format."""
+        assert len(row[self._first_col]) > 0, "At least the first FASTQ file is required."
+        self._validate_fastq_format(row[self._first_col])
+
+    def _validate_second(self, row):
+        """Assert that the second FASTQ entry has the right format if it exists."""
+        if len(row[self._second_col]) > 0:
+            self._validate_fastq_format(row[self._second_col])
+
+    def _validate_pair(self, row):
+        """Assert that read pairs have the same file extension. Report pair status."""
+        if row[self._first_col] and row[self._second_col]:
+            row[self._single_col] = False
+            assert (
+                Path(row[self._first_col]).suffixes == Path(row[self._second_col]).suffixes
+            ), "FASTQ pairs must have the same file extensions."
+        else:
+            row[self._single_col] = True
+
+    def _validate_fastq_format(self, filename):
+        """Assert that a given filename has one of the expected FASTQ extensions."""
+        assert any(filename.endswith(extension) for extension in self.VALID_FORMATS), (
+            f"The FASTQ file has an unrecognized extension: {filename}\n"
+            f"It should be one of: {', '.join(self.VALID_FORMATS)}"
         )
-    print(error_str)
-    sys.exit(1)
 
+    def validate_unique_samples(self):
+        """
+        Assert that the combination of sample name and FASTQ filename is unique.
 
-def check_samplesheet(file_in, file_out):
+        In addition to the validation, also rename the sample if more than one sample,
+        FASTQ file combination exists.
+
+        """
+        assert len(self._seen) == len(self.modified), "The pair of sample name and FASTQ must be unique."
+        if len({pair[0] for pair in self._seen}) < len(self._seen):
+            counts = Counter(pair[0] for pair in self._seen)
+            seen = Counter()
+            for row in self.modified:
+                sample = row[self._sample_col]
+                seen[sample] += 1
+                if counts[sample] > 1:
+                    row[self._sample_col] = f"{sample}_T{seen[sample]}"
+
+
+def sniff_format(handle):
     """
-    This function checks that the samplesheet follows the following structure:
+    Detect the tabular format.
+
+    Args:
+        handle (text file): A handle to a `text file`_ object. The read position is
+        expected to be at the beginning (index 0).
+
+    Returns:
+        csv.Dialect: The detected tabular format.
 
-    sample,fastq_1,fastq_2
-    SAMPLE_PE,SAMPLE_PE_RUN1_1.fastq.gz,SAMPLE_PE_RUN1_2.fastq.gz
-    SAMPLE_PE,SAMPLE_PE_RUN2_1.fastq.gz,SAMPLE_PE_RUN2_2.fastq.gz
-    SAMPLE_SE,SAMPLE_SE_RUN1_1.fastq.gz,
+    .. _text file:
+        https://docs.python.org/3/glossary.html#term-text-file
 
-    For an example see:
-    https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv
     """
+    peek = handle.read(2048)
+    sniffer = csv.Sniffer()
+    if not sniffer.has_header(peek):
+        logger.critical(f"The given sample sheet does not appear to contain a header.")
+        sys.exit(1)
+    dialect = sniffer.sniff(peek)
+    handle.seek(0)
+    return dialect
 
-    sample_mapping_dict = {}
-    with open(file_in, "r") as fin:
 
-        ## Check header
-        MIN_COLS = 2
-        HEADER = ["sample", "fastq_1", "fastq_2"]
-        header = [x.strip('"') for x in fin.readline().strip().split(",")]
-        if header[: len(HEADER)] != HEADER:
-            print("ERROR: Please check samplesheet header -> {} != {}".format(",".join(header), ",".join(HEADER)))
-            sys.exit(1)
+def check_samplesheet(file_in, file_out):
+    """
+    Check that the tabular samplesheet has the structure expected by nf-core pipelines.
+
+    Validate the general shape of the table, expected columns, and each row. Also add
+    an additional column which records whether one or two FASTQ reads were found.
+
+    Args:
+        file_in (pathlib.Path): The given tabular samplesheet. The format can be either
+            CSV, TSV, or any other format automatically recognized by ``csv.Sniffer``.
+        file_out (pathlib.Path): Where the validated and transformed samplesheet should
+            be created; always in CSV format.
 
-        ## Check sample entries
-        for line in fin:
-            lspl = [x.strip().strip('"') for x in line.strip().split(",")]
-
-            # Check valid number of columns per row
-            if len(lspl) < len(HEADER):
-                print_error(
-                    "Invalid number of columns (minimum = {})!".format(len(HEADER)),
-                    "Line",
-                    line,
-                )
-            num_cols = len([x for x in lspl if x])
-            if num_cols < MIN_COLS:
-                print_error(
-                    "Invalid number of populated columns (minimum = {})!".format(MIN_COLS),
-                    "Line",
-                    line,
-                )
-
-            ## Check sample name entries
-            sample, fastq_1, fastq_2 = lspl[: len(HEADER)]
-            sample = sample.replace(" ", "_")
-            if not sample:
-                print_error("Sample entry has not been specified!", "Line", line)
-
-            ## Check FastQ file extension
-            for fastq in [fastq_1, fastq_2]:
-                if fastq:
-                    if fastq.find(" ") != -1:
-                        print_error("FastQ file contains spaces!", "Line", line)
-                    if not fastq.endswith(".fastq.gz") and not fastq.endswith(".fq.gz"):
-                        print_error(
-                            "FastQ file does not have extension '.fastq.gz' or '.fq.gz'!",
-                            "Line",
-                            line,
-                        )
-
-            ## Auto-detect paired-end/single-end
-            sample_info = []  ## [single_end, fastq_1, fastq_2]
-            if sample and fastq_1 and fastq_2:  ## Paired-end short reads
-                sample_info = ["0", fastq_1, fastq_2]
-            elif sample and fastq_1 and not fastq_2:  ## Single-end short reads
-                sample_info = ["1", fastq_1, fastq_2]
-            else:
-                print_error("Invalid combination of columns provided!", "Line", line)
-
-            ## Create sample mapping dictionary = { sample: [ single_end, fastq_1, fastq_2 ] }
-            if sample not in sample_mapping_dict:
-                sample_mapping_dict[sample] = [sample_info]
-            else:
-                if sample_info in sample_mapping_dict[sample]:
-                    print_error("Samplesheet contains duplicate rows!", "Line", line)
-                else:
-                    sample_mapping_dict[sample].append(sample_info)
-
-    ## Write validated samplesheet with appropriate columns
-    if len(sample_mapping_dict) > 0:
-        out_dir = os.path.dirname(file_out)
-        make_dir(out_dir)
-        with open(file_out, "w") as fout:
-            fout.write(",".join(["sample", "single_end", "fastq_1", "fastq_2"]) + "\n")
-            for sample in sorted(sample_mapping_dict.keys()):
-
-                ## Check that multiple runs of the same sample are of the same datatype
-                if not all(x[0] == sample_mapping_dict[sample][0][0] for x in sample_mapping_dict[sample]):
-                    print_error("Multiple runs of a sample must be of the same datatype!", "Sample: {}".format(sample))
-
-                for idx, val in enumerate(sample_mapping_dict[sample]):
-                    fout.write(",".join(["{}_T{}".format(sample, idx + 1)] + val) + "\n")
-    else:
-        print_error("No entries to process!", "Samplesheet: {}".format(file_in))
-
-
-def main(args=None):
-    args = parse_args(args)
-    check_samplesheet(args.FILE_IN, args.FILE_OUT)
+    Example:
+        This function checks that the samplesheet follows the following structure,
+        see also the `viral recon samplesheet`_::
+
+            sample,fastq_1,fastq_2
+            SAMPLE_PE,SAMPLE_PE_RUN1_1.fastq.gz,SAMPLE_PE_RUN1_2.fastq.gz
+            SAMPLE_PE,SAMPLE_PE_RUN2_1.fastq.gz,SAMPLE_PE_RUN2_2.fastq.gz
+            SAMPLE_SE,SAMPLE_SE_RUN1_1.fastq.gz,
+
+    .. _viral recon samplesheet:
+        https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv
+
+    """
+    required_columns = {"sample", "fastq_1", "fastq_2"}
+    # See https://docs.python.org/3.9/library/csv.html#id3 to read up on `newline=""`.
+    with file_in.open(newline="") as in_handle:
+        reader = csv.DictReader(in_handle, dialect=sniff_format(in_handle))
+        # Validate the existence of the expected header columns.
+        if not required_columns.issubset(reader.fieldnames):
+            logger.critical(f"The sample sheet **must** contain the column headers: {', '.join(required_columns)}.")
+            sys.exit(1)
+        # Validate each row.
+        checker = RowChecker()
+        for i, row in enumerate(reader):
+            try:
+                checker.validate_and_transform(row)
+            except AssertionError as error:
+                logger.critical(f"{str(error)} On line {i + 2}.")
+                sys.exit(1)
+        checker.validate_unique_samples()
+    header = list(reader.fieldnames)
+    header.insert(1, "single_end")
+    # See https://docs.python.org/3.9/library/csv.html#id3 to read up on `newline=""`.
+    with file_out.open(mode="w", newline="") as out_handle:
+        writer = csv.DictWriter(out_handle, header, delimiter=",")
+        writer.writeheader()
+        for row in checker.modified:
+            writer.writerow(row)
+
+
+def parse_args(argv=None):
+    """Define and immediately parse command line arguments."""
+    parser = argparse.ArgumentParser(
+        description="Validate and transform a tabular samplesheet.",
+        epilog="Example: python check_samplesheet.py samplesheet.csv samplesheet.valid.csv",
+    )
+    parser.add_argument(
+        "file_in",
+        metavar="FILE_IN",
+        type=Path,
+        help="Tabular input samplesheet in CSV or TSV format.",
+    )
+    parser.add_argument(
+        "file_out",
+        metavar="FILE_OUT",
+        type=Path,
+        help="Transformed output samplesheet in CSV format.",
+    )
+    parser.add_argument(
+        "-l",
+        "--log-level",
+        help="The desired log level (default WARNING).",
+        choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"),
+        default="WARNING",
+    )
+    return parser.parse_args(argv)
+
+
+def main(argv=None):
+    """Coordinate argument parsing and program execution."""
+    args = parse_args(argv)
+    logging.basicConfig(level=args.log_level, format="[%(levelname)s] %(message)s")
+    if not args.file_in.is_file():
+        logger.error(f"The given input file {args.file_in} was not found!")
+        sys.exit(2)
+    args.file_out.parent.mkdir(parents=True, exist_ok=True)
+    check_samplesheet(args.file_in, args.file_out)
 
 
 if __name__ == "__main__":
diff --git a/bin/markdown_to_html.py b/bin/markdown_to_html.py
deleted file mode 100755
index a26d1ff5e6de3c09385760e76cc40f11a512b3a4..0000000000000000000000000000000000000000
--- a/bin/markdown_to_html.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-from __future__ import print_function
-import argparse
-import markdown
-import os
-import sys
-import io
-
-
-def convert_markdown(in_fn):
-    input_md = io.open(in_fn, mode="r", encoding="utf-8").read()
-    html = markdown.markdown(
-        "[TOC]\n" + input_md,
-        extensions=["pymdownx.extra", "pymdownx.b64", "pymdownx.highlight", "pymdownx.emoji", "pymdownx.tilde", "toc"],
-        extension_configs={
-            "pymdownx.b64": {"base_path": os.path.dirname(in_fn)},
-            "pymdownx.highlight": {"noclasses": True},
-            "toc": {"title": "Table of Contents"},
-        },
-    )
-    return html
-
-
-def wrap_html(contents):
-    header = """<!DOCTYPE html><html>
-    <head>
-        <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous">
-        <style>
-            body {
-              font-family: -apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";
-              padding: 3em;
-              margin-right: 350px;
-              max-width: 100%;
-            }
-            .toc {
-              position: fixed;
-              right: 20px;
-              width: 300px;
-              padding-top: 20px;
-              overflow: scroll;
-              height: calc(100% - 3em - 20px);
-            }
-            .toctitle {
-              font-size: 1.8em;
-              font-weight: bold;
-            }
-            .toc > ul {
-              padding: 0;
-              margin: 1rem 0;
-              list-style-type: none;
-            }
-            .toc > ul ul { padding-left: 20px; }
-            .toc > ul > li > a { display: none; }
-            img { max-width: 800px; }
-            pre {
-              padding: 0.6em 1em;
-            }
-            h2 {
-
-            }
-        </style>
-    </head>
-    <body>
-    <div class="container">
-    """
-    footer = """
-    </div>
-    </body>
-    </html>
-    """
-    return header + contents + footer
-
-
-def parse_args(args=None):
-    parser = argparse.ArgumentParser()
-    parser.add_argument("mdfile", type=argparse.FileType("r"), nargs="?", help="File to convert. Defaults to stdin.")
-    parser.add_argument(
-        "-o", "--out", type=argparse.FileType("w"), default=sys.stdout, help="Output file name. Defaults to stdout."
-    )
-    return parser.parse_args(args)
-
-
-def main(args=None):
-    args = parse_args(args)
-    converted_md = convert_markdown(args.mdfile.name)
-    html = wrap_html(converted_md)
-    args.out.write(html)
-
-
-if __name__ == "__main__":
-    sys.exit(main())
diff --git a/bin/scrape_software_versions.py b/bin/scrape_software_versions.py
deleted file mode 100755
index 322c97c6fc74194e352853b6f5c8f33827a1cbd9..0000000000000000000000000000000000000000
--- a/bin/scrape_software_versions.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-from __future__ import print_function
-import os
-
-results = {}
-version_files = [x for x in os.listdir(".") if x.endswith(".version.txt")]
-for version_file in version_files:
-
-    software = version_file.replace(".version.txt", "")
-    if software == "pipeline":
-        software = "nf-core/hic"
-
-    with open(version_file) as fin:
-        version = fin.read().strip()
-    results[software] = version
-
-# Dump to YAML
-print(
-    """
-id: 'software_versions'
-section_name: 'nf-core/hic Software Versions'
-section_href: 'https://github.com/nf-core/hic'
-plot_type: 'html'
-description: 'are collected at run time from the software output.'
-data: |
-    <dl class="dl-horizontal">
-"""
-)
-for k, v in sorted(results.items()):
-    print("        <dt>{}</dt><dd><samp>{}</samp></dd>".format(k, v))
-print("    </dl>")
-
-# Write out as tsv file:
-with open("software_versions.tsv", "w") as f:
-    for k, v in sorted(results.items()):
-        f.write("{}\t{}\n".format(k, v))
diff --git a/conf/base.config b/conf/base.config
index 7d29ffef7f7f03cc82e52b19ce8abe9fb3935330..96e174215898ee824c5a09641e864d2659e7ab40 100644
--- a/conf/base.config
+++ b/conf/base.config
@@ -1,7 +1,7 @@
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     nf-core/hic Nextflow base config file
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     A 'blank slate' config file, appropriate for general use on most high performance
     compute environments. Assumes that all software is installed and available on
     the PATH. Runs in `local` mode - all jobs will be run on the logged in environment.
@@ -26,12 +26,12 @@ process {
     // See https://www.nextflow.io/docs/latest/config.html#config-process-selectors
     withLabel:process_low {
         cpus   = { check_max( 2     * task.attempt, 'cpus'    ) }
-        memory = { check_max( 4.GB * task.attempt, 'memory'  ) }
+        memory = { check_max( 4.GB  * task.attempt, 'memory'  ) }
         time   = { check_max( 4.h   * task.attempt, 'time'    ) }
     }
     withLabel:process_medium {
         cpus   = { check_max( 6     * task.attempt, 'cpus'    ) }
-        memory = { check_max( 8.GB * task.attempt, 'memory'  ) }
+        memory = { check_max( 8.GB  * task.attempt, 'memory'  ) }
         time   = { check_max( 8.h   * task.attempt, 'time'    ) }
     }
     withLabel:process_high {
@@ -52,4 +52,7 @@ process {
         errorStrategy = 'retry'
         maxRetries    = 2
     }
+    withName:CUSTOM_DUMPSOFTWAREVERSIONS {
+        cache = false
+    }
 }
diff --git a/conf/igenomes.config b/conf/igenomes.config
index 1ba2588593f4e1940dc0bf3a3380f0114a71684e..99ae9dba917fd600df00efa6c4760b3e876ae73f 100644
--- a/conf/igenomes.config
+++ b/conf/igenomes.config
@@ -1,11 +1,12 @@
 /*
- * -------------------------------------------------
- *  Nextflow config file for iGenomes paths
- * -------------------------------------------------
- * Defines reference genomes, using iGenome paths
- * Can be used by any config that customises the base
- * path using $params.igenomes_base / --igenomes_base
- */
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    Nextflow config file for iGenomes paths
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    Defines reference genomes using iGenome paths.
+    Can be used by any config that customises the base path using:
+        $params.igenomes_base / --igenomes_base
+----------------------------------------------------------------------------------------
+*/
 
 params {
   // illumina iGenomes reference file paths
diff --git a/conf/modules.config b/conf/modules.config
index a934ac12d19f93e090e323e3d5bff585b878d1db..0e2268c54e7909641ca6de15910e777fa9eb3c4d 100644
--- a/conf/modules.config
+++ b/conf/modules.config
@@ -278,4 +278,4 @@ process {
     ]
     ext.args = '--correctForMultipleTesting fdr'
   }
-}
\ No newline at end of file
+}
diff --git a/conf/test.config b/conf/test.config
index faebd8bedf252ccb8b46e54ae8338aaf304ba682..3aad49642075b8e7f154bf84d0d0bb4be48e46ad 100644
--- a/conf/test.config
+++ b/conf/test.config
@@ -1,11 +1,14 @@
 /*
- * -------------------------------------------------
- *  Nextflow config file for running tests
- * -------------------------------------------------
- * Defines bundled input files and everything required
- * to run a fast and simple test. Use as follows:
- *   nextflow run nf-core/hic -profile test,<docker/singularity>
- */
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    Nextflow config file for running minimal tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    Defines input files and everything required to run a fast and simple pipeline test.
+
+    Use as follows:
+        nextflow run nf-core/hic -profile test,<docker/singularity> --outdir <OUTDIR>
+
+----------------------------------------------------------------------------------------
+*/
 
 params {
   config_profile_name = 'Hi-C test data from Schalbetter et al. (2017)'
diff --git a/conf/test_full.config b/conf/test_full.config
index 749878333105099f6ec556853fa2b404e1a28cbb..a9a8183f5a25829e8f323446234ac89ab6dc4353 100644
--- a/conf/test_full.config
+++ b/conf/test_full.config
@@ -1,12 +1,11 @@
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     Nextflow config file for running full-size tests
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     Defines input files and everything required to run a full size pipeline test.
 
     Use as follows:
-        nextflow run nf-core/hic -profile test_full,<docker/singularity>
-
+        nextflow run nf-core/hic -profile test_full,<docker/singularity> --outdir <OUTDIR>
 ----------------------------------------------------------------------------------------
 */
 
diff --git a/docs/README.md b/docs/README.md
index 112811ae596b875a989ea77d0671e97ee1125a7c..d673c508157b82e49fb5a9b1390d905734671cd0 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -2,9 +2,9 @@
 
 The nf-core/hic documentation is split into the following pages:
 
-* [Usage](usage.md)
-    * An overview of how the pipeline works, how to run it and a description of all of the different command-line flags.
-* [Output](output.md)
-    * An overview of the different results produced by the pipeline and how to interpret them.
+- [Usage](usage.md)
+  - An overview of how the pipeline works, how to run it and a description of all of the different command-line flags.
+- [Output](output.md)
+  - An overview of the different results produced by the pipeline and how to interpret them.
 
 You can find a lot more documentation about installing, configuring and running nf-core pipelines on the website: [https://nf-co.re](https://nf-co.re)
diff --git a/docs/images/nf-core-hic_logo.png b/docs/images/nf-core-hic_logo.png
deleted file mode 100644
index 274eb3dc3f3db879c7f3cbc3fd8f49a705a9a3fb..0000000000000000000000000000000000000000
Binary files a/docs/images/nf-core-hic_logo.png and /dev/null differ
diff --git a/docs/images/nf-core-hic_logo_dark.png b/docs/images/nf-core-hic_logo_dark.png
new file mode 100644
index 0000000000000000000000000000000000000000..e245502fd09cb0c22db3025a4e6c9ac4476a77a1
Binary files /dev/null and b/docs/images/nf-core-hic_logo_dark.png differ
diff --git a/docs/images/nf-core-hic_logo_light.png b/docs/images/nf-core-hic_logo_light.png
new file mode 100644
index 0000000000000000000000000000000000000000..5601950978b384469e001f12c14318c51dfb064d
Binary files /dev/null and b/docs/images/nf-core-hic_logo_light.png differ
diff --git a/docs/output.md b/docs/output.md
index 8b3fd0a40579b5ee19f107acdf6f531a8d98702f..2a9d2c23455ce196585f4876d61692d80d24f5e1 100644
--- a/docs/output.md
+++ b/docs/output.md
@@ -7,8 +7,7 @@ The directories listed below will be created in the results directory after the
 
 ## Pipeline overview
 
-The pipeline is built using [Nextflow](https://www.nextflow.io/)
-and processes data using the following steps:
+The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes data using the following steps:
 
 * [HiC-Pro](#hicpro)
   * [Reads alignment](#reads-alignment)
@@ -245,33 +244,32 @@ Currently, the pipeline proposes two approaches :
 
 Usually, TADs results are presented as simple BED files, or bigWig files, with the position of boundaries along the genome.
 
-## MultiQC
+### MultiQC
 
-[MultiQC](http://multiqc.info) is a visualisation tool that generates a single
-HTML report summarising all samples in your project. Most of the pipeline QC
-results are visualised in the report and further statistics are available in
-within the report data directory.
+<details markdown="1">
+<summary>Output files</summary>
 
-The pipeline has special steps which allow the software versions used to be
-reported in the MultiQC output for future traceability.
+- `multiqc/`
+  - `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser.
+  - `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline.
+  - `multiqc_plots/`: directory containing static images from the report in various formats.
 
-**Output files:**
+</details>
 
-* `multiqc/`
-  * `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser.
-  * `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline.
-  * `multiqc_plots/`: directory containing static images from the report in various formats.
+[MultiQC](http://multiqc.info) is a visualization tool that generates a single HTML report summarising all samples in your project. Most of the pipeline QC results are visualised in the report and further statistics are available in the report data directory.
 
-## Pipeline information
+Results generated by MultiQC collate pipeline QC from supported tools e.g. FastQC. The pipeline has special steps which also allow the software versions to be reported in the MultiQC output for future traceability. For more information about how to use MultiQC reports, see <http://multiqc.info>.
 
-[Nextflow](https://www.nextflow.io/docs/latest/tracing.html) provides excellent functionality for generating various reports relevant to the running and execution of the pipeline. This will allow you to troubleshoot errors with the running of the pipeline, and also provide you with other information such as launch commands, run times and resource usage.
+### Pipeline information
+
+<details markdown="1">
+<summary>Output files</summary>
 
-**Output files:**
+- `pipeline_info/`
+  - Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`.
+  - Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.yml`. The `pipeline_report*` files will only be present if the `--email` / `--email_on_fail` parameter's are used when running the pipeline.
+  - Reformatted samplesheet files used as input to the pipeline: `samplesheet.valid.csv`.
 
-* `pipeline_info/`
-  * Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`,
-  `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`.
-  * Reports generated by the pipeline: `pipeline_report.html`,
-  `pipeline_report.txt` and `software_versions.csv`.
-  * Documentation for interpretation of results in HTML format:
-  `results_description.html`.
+</details>
+
+[Nextflow](https://www.nextflow.io/docs/latest/tracing.html) provides excellent functionality for generating various reports relevant to the running and execution of the pipeline. This will allow you to troubleshoot errors with the running of the pipeline, and also provide you with other information such as launch commands, run times and resource usage.
diff --git a/docs/usage.md b/docs/usage.md
index 800d44713563554482d79b8e165d06514ad921e3..ae72609133239e78ee45e12fcc7f49ac046616ee 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -6,12 +6,56 @@
 
 ## Introduction
 
+## Samplesheet input
+
+You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 3 columns, and a header row as shown in the examples below.
+
+```console
+--input '[path to samplesheet file]'
+```
+
+### Multiple runs of the same sample
+
+The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. The pipeline will concatenate the raw reads before performing any downstream analysis. Below is an example for the same sample sequenced across 3 lanes:
+
+```console
+sample,fastq_1,fastq_2
+CONTROL_REP1,AEG588A1_S1_L002_R1_001.fastq.gz,AEG588A1_S1_L002_R2_001.fastq.gz
+CONTROL_REP1,AEG588A1_S1_L003_R1_001.fastq.gz,AEG588A1_S1_L003_R2_001.fastq.gz
+CONTROL_REP1,AEG588A1_S1_L004_R1_001.fastq.gz,AEG588A1_S1_L004_R2_001.fastq.gz
+```
+
+### Full samplesheet
+
+The pipeline will auto-detect whether a sample is single- or paired-end using the information provided in the samplesheet. The samplesheet can have as many columns as you desire, however, there is a strict requirement for the first 3 columns to match those defined in the table below.
+
+A final samplesheet file consisting of both single- and paired-end data may look something like the one below. This is for 6 samples, where `TREATMENT_REP3` has been sequenced twice.
+
+```console
+sample,fastq_1,fastq_2
+CONTROL_REP1,AEG588A1_S1_L002_R1_001.fastq.gz,AEG588A1_S1_L002_R2_001.fastq.gz
+CONTROL_REP2,AEG588A2_S2_L002_R1_001.fastq.gz,AEG588A2_S2_L002_R2_001.fastq.gz
+CONTROL_REP3,AEG588A3_S3_L002_R1_001.fastq.gz,AEG588A3_S3_L002_R2_001.fastq.gz
+TREATMENT_REP1,AEG588A4_S4_L003_R1_001.fastq.gz,
+TREATMENT_REP2,AEG588A5_S5_L003_R1_001.fastq.gz,
+TREATMENT_REP3,AEG588A6_S6_L003_R1_001.fastq.gz,
+TREATMENT_REP3,AEG588A6_S6_L004_R1_001.fastq.gz,
+```
+
+| Column    | Description                                                                                                                                                                            |
+| --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `sample`  | Custom sample name. This entry will be identical for multiple sequencing libraries/runs from the same sample. Spaces in sample names are automatically converted to underscores (`_`). |
+| `fastq_1` | Full path to FastQ file for Illumina short reads 1. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz".                                                             |
+| `fastq_2` | Full path to FastQ file for Illumina short reads 2. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz".                                                             |
+
+An [example samplesheet](../assets/samplesheet.csv) has been provided with the pipeline.
+
 ## Running the pipeline
 
 The typical command for running the pipeline is as follows:
 
-```bash
-nextflow run nf-core/hic --input '*_R{1,2}.fastq.gz' -profile docker
+```console
+nextflow run nf-core/hic --input samplesheet.csv --outdir <OUTDIR> --genome GRCh37 -profile docker
 ```
 
 This will launch the pipeline with the `docker` configuration profile.
@@ -19,10 +63,10 @@ See below for more information about profiles.
 
 Note that the pipeline will create the following files in your working directory:
 
-```bash
-work            # Directory containing the nextflow working files
-results         # Finished results (configurable, see below)
-.nextflow_log   # Log file from Nextflow
+```console
+work                # Directory containing the nextflow working files
+<OUTIDR>            # Finished results in specified location (defined with --outdir)
+.nextflow_log       # Log file from Nextflow
 # Other nextflow hidden files, eg. history of pipeline runs and old logs.
 ```
 
@@ -30,13 +74,13 @@ results         # Finished results (configurable, see below)
 
 When you run the above command, Nextflow automatically pulls the pipeline code from GitHub and stores it as a cached version. When running the pipeline after this, it will always use the cached version if available - even if the pipeline has been updated since. To make sure that you're running the latest version of the pipeline, make sure that you regularly update the cached version of the pipeline:
 
-```bash
+```console
 nextflow pull nf-core/hic
 ```
 
 ### Reproducibility
 
-It's a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since.
+It is a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since.
 
 First, go to the
 [nf-core/hic releases page](https://github.com/nf-core/hic/releases) and find
@@ -65,7 +109,7 @@ fails after three times then the pipeline is stopped.
 Use this parameter to choose a configuration profile. Profiles can give
 configuration presets for different compute environments.
 
-Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below.
+Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below. When using Biocontainers, most of these software packaging methods pull Docker containers from quay.io e.g [FastQC](https://quay.io/repository/biocontainers/fastqc) except for Singularity which directly downloads Singularity images via https hosted by the [Galaxy project](https://depot.galaxyproject.org/singularity/) and Conda which downloads and installs software locally from [Bioconda](https://bioconda.github.io/).
 
 > We highly recommend the use of Docker or Singularity containers for full
 pipeline reproducibility, however when this is not possible, Conda is also supported.
@@ -87,36 +131,27 @@ If `-profile` is not specified, the pipeline will run locally and
 expect all software to be
 installed and available on the `PATH`. This is _not_ recommended.
 
-* `docker`
-  * A generic configuration profile to be used with [Docker](https://docker.com/)
-  * Pulls software from Docker Hub: [`nfcore/hic`](https://hub.docker.com/r/nfcore/hic/)
-* `singularity`
-  * A generic configuration profile to be used with [Singularity](https://sylabs.io/docs/)
-  * Pulls software from Docker Hub: [`nfcore/hic`](https://hub.docker.com/r/nfcore/hic/)
-* `podman`
-  * A generic configuration profile to be used with [Podman](https://podman.io/)
-  * Pulls software from Docker Hub: [`nfcore/hic`](https://hub.docker.com/r/nfcore/hic/)
-* `shifter`
-  * A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/)
-  * Pulls software from Docker Hub: [`nfcore/hic`](https://hub.docker.com/r/nfcore/hic/)
-* `charliecloud`
-  * A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/)
-  * Pulls software from Docker Hub: [`nfcore/hic`](https://hub.docker.com/r/nfcore/hic/)
-* `conda`
-  * Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud.
-  * A generic configuration profile to be used with [Conda](https://conda.io/docs/)
-  * Pulls most software from [Bioconda](https://bioconda.github.io/)
-* `test`
-  * A profile with a complete configuration for automated testing
-  * Includes links to test data so needs no other parameters
+- `docker`
+  - A generic configuration profile to be used with [Docker](https://docker.com/)
+- `singularity`
+  - A generic configuration profile to be used with [Singularity](https://sylabs.io/docs/)
+- `podman`
+  - A generic configuration profile to be used with [Podman](https://podman.io/)
+- `shifter`
+  - A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/)
+- `charliecloud`
+  - A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/)
+- `conda`
+  - A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud.
+- `test`
+  - A profile with a complete configuration for automated testing
+  - Includes links to test data so needs no other parameters
 
 ### `-resume`
 
-Specify this when restarting a pipeline. Nextflow will used cached results from
-any pipeline steps where the inputs are the same, continuing from where it got
-to previously.
-You can also supply a run name to resume a specific run: `-resume [run-name]`.
-Use the `nextflow log` command to show previous run names.
+Specify this when restarting a pipeline. Nextflow will use cached results from any pipeline steps where the inputs are the same, continuing from where it got to previously. For input to be considered the same, not only the names must be identical but the files' contents as well. For more info about this parameter, see [this blog post](https://www.nextflow.io/blog/2019/demystifying-nextflow-resume.html).
+
+You can also supply a run name to resume a specific run: `-resume [run-name]`. Use the `nextflow log` command to show previous run names.
 
 ### `-c`
 
@@ -124,38 +159,114 @@ Specify the path to a specific config file (this is a core Nextflow command).
 See the [nf-core website documentation](https://nf-co.re/usage/configuration)
 for more information.
 
-#### Custom resource requests
+## Custom configuration
 
-Each step in the pipeline has a default set of requirements for number of CPUs,
-memory and time. For most of the steps in the pipeline, if the job exits with
-an error code of `143` (exceeded requested resources) it will automatically resubmit
-with higher requests (2 x original, then 3 x original). If it still fails after three
-times then the pipeline is stopped.
+### Resource requests
+
+Whilst the default requirements set within the pipeline will hopefully work for most people and with most input data, you may find that you want to customise the compute resources that the pipeline requests. Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the steps in the pipeline, if the job exits with any of the error codes specified [here](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L18) it will automatically be resubmitted with higher requests (2 x original, then 3 x original). If it still fails after the third attempt then the pipeline execution is stopped.
 
-Whilst these default requirements will hopefully work for most people with most data,
-you may find that you want to customise the compute resources that the pipeline requests.
-You can do this by creating a custom config file. For example, to give the workflow
-process `star` 32GB of memory, you could use the following config:
+For example, if the nf-core/rnaseq pipeline is failing after multiple re-submissions of the `STAR_ALIGN` process due to an exit code of `137` this would indicate that there is an out of memory issue:
+
+```console
+[62/149eb0] NOTE: Process `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) -- Execution is retried (1)
+Error executing process > 'NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)'
+
+Caused by:
+    Process `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137)
+
+Command executed:
+    STAR \
+        --genomeDir star \
+        --readFilesIn WT_REP1_trimmed.fq.gz  \
+        --runThreadN 2 \
+        --outFileNamePrefix WT_REP1. \
+        <TRUNCATED>
+
+Command exit status:
+    137
+
+Command output:
+    (empty)
+
+Command error:
+    .command.sh: line 9:  30 Killed    STAR --genomeDir star --readFilesIn WT_REP1_trimmed.fq.gz --runThreadN 2 --outFileNamePrefix WT_REP1. <TRUNCATED>
+Work dir:
+    /home/pipelinetest/work/9d/172ca5881234073e8d76f2a19c88fb
+
+Tip: you can replicate the issue by changing to the process work dir and entering the command `bash .command.run`
+```
+
+To bypass this error you would need to find exactly which resources are set by the `STAR_ALIGN` process. The quickest way is to search for `process STAR_ALIGN` in the [nf-core/rnaseq Github repo](https://github.com/nf-core/rnaseq/search?q=process+STAR_ALIGN).
+We have standardised the structure of Nextflow DSL2 pipelines such that all module files will be present in the `modules/` directory and so, based on the search results, the file we want is `modules/nf-core/software/star/align/main.nf`.
+If you click on the link to that file you will notice that there is a `label` directive at the top of the module that is set to [`label process_high`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L9).
+The [Nextflow `label`](https://www.nextflow.io/docs/latest/process.html#label) directive allows us to organise workflow processes in separate groups which can be referenced in a configuration file to select and configure subset of processes having similar computing requirements.
+The default values for the `process_high` label are set in the pipeline's [`base.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L33-L37) which in this case is defined as 72GB.
+Providing you haven't set any other standard nf-core parameters to **cap** the [maximum resources](https://nf-co.re/usage/configuration#max-resources) used by the pipeline then we can try and bypass the `STAR_ALIGN` process failure by creating a custom config file that sets at least 72GB of memory, in this case increased to 100GB.
+The custom config below can then be provided to the pipeline via the [`-c`](#-c) parameter as highlighted in previous sections.
 
 ```nextflow
 process {
-  withName: star {
-    memory = 32.GB
-  }
+    withName: 'NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN' {
+        memory = 100.GB
+    }
 }
 ```
 
-To find the exact name of a process you wish to modify the compute resources, check the live-status of a nextflow run displayed on your terminal or check the nextflow error for a line like so: `Error executing process > 'bowtie2_end_to_end'`. In this case the name to specify in the custom config file is `bowtie2_end_to_end`.
+> **NB:** We specify the full process name i.e. `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN` in the config file because this takes priority over the short name (`STAR_ALIGN`) and allows existing configuration using the full process name to be correctly overridden.
+>
+> If you get a warning suggesting that the process selector isn't recognised check that the process name has been specified correctly.
+
+### Updating containers
+
+The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. If for some reason you need to use a different version of a particular tool with the pipeline then you just need to identify the `process` name and override the Nextflow `container` definition for that process using the `withName` declaration. For example, in the [nf-core/viralrecon](https://nf-co.re/viralrecon) pipeline a tool called [Pangolin](https://github.com/cov-lineages/pangolin) has been used during the COVID-19 pandemic to assign lineages to SARS-CoV-2 genome sequenced samples. Given that the lineage assignments change quite frequently it doesn't make sense to re-release the nf-core/viralrecon everytime a new version of Pangolin has been released. However, you can override the default container used by the pipeline by creating a custom config file and passing it as a command-line argument via `-c custom.config`.
 
-See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for more information.
+1. Check the default version used by the pipeline in the module file for [Pangolin](https://github.com/nf-core/viralrecon/blob/a85d5969f9025409e3618d6c280ef15ce417df65/modules/nf-core/software/pangolin/main.nf#L14-L19)
+2. Find the latest version of the Biocontainer available on [Quay.io](https://quay.io/repository/biocontainers/pangolin?tag=latest&tab=tags)
+3. Create the custom config accordingly:
 
-If you are likely to be running `nf-core` pipelines regularly it may be a good idea to request that your custom config file is uploaded to the `nf-core/configs` git repository. Before you do this please can you test that the config file works with your pipeline of choice using the `-c` parameter (see definition above). You can then create a pull request to the `nf-core/configs` repository with the addition of your config file, associated documentation file (see examples in [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs)), and amending [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) to include your custom profile.
+   - For Docker:
+
+     ```nextflow
+     process {
+         withName: PANGOLIN {
+             container = 'quay.io/biocontainers/pangolin:3.0.5--pyhdfd78af_0'
+         }
+     }
+     ```
+
+   - For Singularity:
+
+     ```nextflow
+     process {
+         withName: PANGOLIN {
+             container = 'https://depot.galaxyproject.org/singularity/pangolin:3.0.5--pyhdfd78af_0'
+         }
+     }
+     ```
+
+   - For Conda:
+
+     ```nextflow
+     process {
+         withName: PANGOLIN {
+             conda = 'bioconda::pangolin=3.0.5'
+         }
+     }
+     ```
+
+> **NB:** If you wish to periodically update individual tool-specific results (e.g. Pangolin) generated by the pipeline then you must ensure to keep the `work/` directory otherwise the `-resume` ability of the pipeline will be compromised and it will restart from scratch.
+
+### nf-core/configs
+
+In most cases, you will only need to create a custom config as a one-off but if you and others within your organisation are likely to be running nf-core pipelines regularly and need to use the same settings regularly it may be a good idea to request that your custom config file is uploaded to the `nf-core/configs` git repository. Before you do this please can you test that the config file works with your pipeline of choice using the `-c` parameter. You can then create a pull request to the `nf-core/configs` repository with the addition of your config file, associated documentation file (see examples in [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs)), and amending [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) to include your custom profile.
+
+See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for more information about creating your own configuration files.
 
 If you have any questions or issues please send us a message on
 [Slack](https://nf-co.re/join/slack) on the
 [`#configs` channel](https://nfcore.slack.com/channels/configs).
 
-### Running in the background
+## Running in the background
 
 Nextflow handles job submissions and supervises the running jobs.
 The Nextflow process must run until the pipeline is finished.
@@ -169,14 +280,14 @@ session which you can log back into at a later time.
 Some HPC setups also allow you to run nextflow within a cluster job submitted
 your job scheduler (from where it submits more jobs).
 
-#### Nextflow memory requirements
+## Nextflow memory requirements
 
 In some cases, the Nextflow Java virtual machines can start to request a
 large amount of memory.
 We recommend adding the following line to your environment to limit this
 (typically in `~/.bashrc` or `~./bash_profile`):
 
-```bash
+```console
 NXF_OPTS='-Xms1g -Xmx4g'
 ```
 
diff --git a/lib/Headers.groovy b/lib/Headers.groovy
deleted file mode 100644
index 15d1d388006df42e226aea961f0d21dbdabaa8cb..0000000000000000000000000000000000000000
--- a/lib/Headers.groovy
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * This file holds several functions used to render the nf-core ANSI header.
- */
-
-class Headers {
-
-    private static Map log_colours(Boolean monochrome_logs) {
-        Map colorcodes = [:]
-        colorcodes['reset']       = monochrome_logs ? '' : "\033[0m"
-        colorcodes['dim']         = monochrome_logs ? '' : "\033[2m"
-        colorcodes['black']       = monochrome_logs ? '' : "\033[0;30m"
-        colorcodes['green']       = monochrome_logs ? '' : "\033[0;32m"
-        colorcodes['yellow']      = monochrome_logs ? '' :  "\033[0;33m"
-        colorcodes['yellow_bold'] = monochrome_logs ? '' : "\033[1;93m"
-        colorcodes['blue']        = monochrome_logs ? '' : "\033[0;34m"
-        colorcodes['purple']      = monochrome_logs ? '' : "\033[0;35m"
-        colorcodes['cyan']        = monochrome_logs ? '' : "\033[0;36m"
-        colorcodes['white']       = monochrome_logs ? '' : "\033[0;37m"
-        colorcodes['red']         = monochrome_logs ? '' : "\033[1;91m"
-        return colorcodes
-    }
-
-    static String dashed_line(monochrome_logs) {
-        Map colors = log_colours(monochrome_logs)
-        return "-${colors.dim}----------------------------------------------------${colors.reset}-"
-    }
-
-    static String nf_core(workflow, monochrome_logs) {
-        Map colors = log_colours(monochrome_logs)
-        String.format(
-            """\n
-            ${dashed_line(monochrome_logs)}
-                                                    ${colors.green},--.${colors.black}/${colors.green},-.${colors.reset}
-            ${colors.blue}        ___     __   __   __   ___     ${colors.green}/,-._.--~\'${colors.reset}
-            ${colors.blue}  |\\ | |__  __ /  ` /  \\ |__) |__         ${colors.yellow}}  {${colors.reset}
-            ${colors.blue}  | \\| |       \\__, \\__/ |  \\ |___     ${colors.green}\\`-._,-`-,${colors.reset}
-                                                    ${colors.green}`._,._,\'${colors.reset}
-            ${colors.purple}  ${workflow.manifest.name} v${workflow.manifest.version}${colors.reset}
-            ${dashed_line(monochrome_logs)}
-            """.stripIndent()
-        )
-    }
-}
diff --git a/lib/NfcoreSchema.groovy b/lib/NfcoreSchema.groovy
old mode 100644
new mode 100755
index 8d6920dd645644e70e8bce260022e7e70be97788..b3d092f8090902661a13b951bb251af4645f8d80
--- a/lib/NfcoreSchema.groovy
+++ b/lib/NfcoreSchema.groovy
@@ -27,7 +27,7 @@ class NfcoreSchema {
     /* groovylint-disable-next-line UnusedPrivateMethodParameter */
     public static void validateParameters(workflow, params, log, schema_filename='nextflow_schema.json') {
         def has_error = false
-        //=====================================================================//
+        //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
         // Check for nextflow core params and unexpected params
         def json = new File(getSchemaPath(workflow, schema_filename=schema_filename)).text
         def Map schemaParams = (Map) new JsonSlurper().parseText(json).get('definitions')
@@ -105,9 +105,13 @@ class NfcoreSchema {
 
         // Collect expected parameters from the schema
         def expectedParams = []
+        def enums = [:]
         for (group in schemaParams) {
             for (p in group.value['properties']) {
                 expectedParams.push(p.key)
+                if (group.value['properties'][p.key].containsKey('enum')) {
+                    enums[p.key] = group.value['properties'][p.key]['enum']
+                }
             }
         }
 
@@ -131,7 +135,7 @@ class NfcoreSchema {
             }
         }
 
-        //=====================================================================//
+        //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~//
         // Validate parameters against the schema
         InputStream input_stream = new File(getSchemaPath(workflow, schema_filename=schema_filename)).newInputStream()
         JSONObject raw_schema = new JSONObject(new JSONTokener(input_stream))
@@ -155,7 +159,7 @@ class NfcoreSchema {
             println ''
             log.error 'ERROR: Validation of pipeline parameters failed!'
             JSONObject exceptionJSON = e.toJSON()
-            printExceptions(exceptionJSON, params_json, log)
+            printExceptions(exceptionJSON, params_json, log, enums)
             println ''
             has_error = true
         }
@@ -202,7 +206,7 @@ class NfcoreSchema {
                 }
                 def type = '[' + group_params.get(param).type + ']'
                 def description = group_params.get(param).description
-                def defaultValue = group_params.get(param).default ? " [default: " + group_params.get(param).default.toString() + "]" : ''
+                def defaultValue = group_params.get(param).default != null ? " [default: " + group_params.get(param).default.toString() + "]" : ''
                 def description_default = description + colors.dim + defaultValue + colors.reset
                 // Wrap long description texts
                 // Loosely based on https://dzone.com/articles/groovy-plain-text-word-wrap
@@ -260,13 +264,12 @@ class NfcoreSchema {
 
         // Get pipeline parameters defined in JSON Schema
         def Map params_summary = [:]
-        def blacklist  = ['hostnames']
         def params_map = paramsLoad(getSchemaPath(workflow, schema_filename=schema_filename))
         for (group in params_map.keySet()) {
             def sub_params = new LinkedHashMap()
             def group_params = params_map.get(group)  // This gets the parameters of that particular group
             for (param in group_params.keySet()) {
-                if (params.containsKey(param) && !blacklist.contains(param)) {
+                if (params.containsKey(param)) {
                     def params_value = params.get(param)
                     def schema_value = group_params.get(param).default
                     def param_type   = group_params.get(param).type
@@ -330,7 +333,7 @@ class NfcoreSchema {
     //
     // Loop over nested exceptions and print the causingException
     //
-    private static void printExceptions(ex_json, params_json, log) {
+    private static void printExceptions(ex_json, params_json, log, enums, limit=5) {
         def causingExceptions = ex_json['causingExceptions']
         if (causingExceptions.length() == 0) {
             def m = ex_json['message'] =~ /required key \[([^\]]+)\] not found/
@@ -346,11 +349,20 @@ class NfcoreSchema {
             else {
                 def param = ex_json['pointerToViolation'] - ~/^#\//
                 def param_val = params_json[param].toString()
-                log.error "* --${param}: ${ex_json['message']} (${param_val})"
+                if (enums.containsKey(param)) {
+                    def error_msg = "* --${param}: '${param_val}' is not a valid choice (Available choices"
+                    if (enums[param].size() > limit) {
+                        log.error "${error_msg} (${limit} of ${enums[param].size()}): ${enums[param][0..limit-1].join(', ')}, ... )"
+                    } else {
+                        log.error "${error_msg}: ${enums[param].join(', ')})"
+                    }
+                } else {
+                    log.error "* --${param}: ${ex_json['message']} (${param_val})"
+                }
             }
         }
         for (ex in causingExceptions) {
-            printExceptions(ex, params_json, log)
+            printExceptions(ex, params_json, log, enums)
         }
     }
 
diff --git a/lib/NfcoreTemplate.groovy b/lib/NfcoreTemplate.groovy
index 44551e0a3521cfd9f02501d0a4a1db460dd1ff07..2fc0a9b9b61d85455653e2abbc91391f8b8606b0 100755
--- a/lib/NfcoreTemplate.groovy
+++ b/lib/NfcoreTemplate.groovy
@@ -19,27 +19,16 @@ class NfcoreTemplate {
     }
 
     //
-    // Check params.hostnames
+    //  Warn if a -profile or Nextflow config has not been provided to run the pipeline
     //
-    public static void hostName(workflow, params, log) {
-        Map colors = logColours(params.monochrome_logs)
-        if (params.hostnames) {
-            try {
-                def hostname = "hostname".execute().text.trim()
-                params.hostnames.each { prof, hnames ->
-                    hnames.each { hname ->
-                        if (hostname.contains(hname) && !workflow.profile.contains(prof)) {
-                            log.info "=${colors.yellow}====================================================${colors.reset}=\n" +
-                                "${colors.yellow}WARN: You are running with `-profile $workflow.profile`\n" +
-                                "      but your machine hostname is ${colors.white}'$hostname'${colors.reset}.\n" +
-                                "      ${colors.yellow_bold}Please use `-profile $prof${colors.reset}`\n" +
-                                "=${colors.yellow}====================================================${colors.reset}="
-                        }
-                    }
-                }
-            } catch (Exception e) {
-                log.warn "[$workflow.manifest.name] Could not determine 'hostname' - skipping check. Reason: ${e.message}."
-            }
+    public static void checkConfigProvided(workflow, log) {
+        if (workflow.profile == 'standard' && workflow.configFiles.size() <= 1) {
+            log.warn "[$workflow.manifest.name] You are attempting to run the pipeline without any custom configuration!\n\n" +
+                    "This will be dependent on your local compute environment but can be achieved via one or more of the following:\n" +
+                    "   (1) Using an existing pipeline profile e.g. `-profile docker` or `-profile singularity`\n" +
+                    "   (2) Using an existing nf-core/configs for your Institution e.g. `-profile crick` or `-profile uppmax`\n" +
+                    "   (3) Using your own local custom config e.g. `-c /path/to/your/custom.config`\n\n" +
+                    "Please refer to the quick start section and usage docs for the pipeline.\n "
         }
     }
 
@@ -168,7 +157,6 @@ class NfcoreTemplate {
                 log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed successfully, but with errored process(es) ${colors.reset}-"
             }
         } else {
-            hostName(workflow, params, log)
             log.info "-${colors.purple}[$workflow.manifest.name]${colors.red} Pipeline completed with errors${colors.reset}-"
         }
     }
diff --git a/lib/Utils.groovy b/lib/Utils.groovy
index 18173e98503206c71e7cfc1615bfbfb6202c1198..28567bd70d63ebdae1340a22458cf8bc5a6fecf2 100755
--- a/lib/Utils.groovy
+++ b/lib/Utils.groovy
@@ -29,19 +29,12 @@ class Utils {
         conda_check_failed |= !(channels.indexOf('bioconda') < channels.indexOf('defaults'))
 
         if (conda_check_failed) {
-            log.warn "=============================================================================\n" +
+            log.warn "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" +
                 "  There is a problem with your Conda configuration!\n\n" +
                 "  You will need to set-up the conda-forge and bioconda channels correctly.\n" +
                 "  Please refer to https://bioconda.github.io/user/install.html#set-up-channels\n" +
                 "  NB: The order of the channels matters!\n" +
-                "==================================================================================="
+                "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
         }
     }
-
-    //
-    // Join module args with appropriate spacing
-    //
-    public static String joinModuleArgs(args_list) {
-        return ' ' + args_list.join(' ')
-    }
 }
diff --git a/lib/WorkflowHic.groovy b/lib/WorkflowHic.groovy
index 2c21fc580f1cb2df0a168e3fb32eaaaf7510f9d7..c6945ae2c9956f80f02d4d956b0a9d4f304c3c0f 100755
--- a/lib/WorkflowHic.groovy
+++ b/lib/WorkflowHic.groovy
@@ -56,11 +56,19 @@ class WorkflowHic {
     //
     private static void genomeExistsError(params, log) {
         if (params.genomes && params.genome && !params.genomes.containsKey(params.genome)) {
+<<<<<<< HEAD
             log.error "=============================================================================\n" +
                 "  Genome '${params.genome}' not found in any config files provided to the pipeline.\n" +
                 "  Currently, the available genome keys are:\n" +
                 "  ${params.genomes.keySet().join(", ")}\n" +
                 "==================================================================================="
+=======
+            log.error "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" +
+                "  Genome '${params.genome}' not found in any config files provided to the pipeline.\n" +
+                "  Currently, the available genome keys are:\n" +
+                "  ${params.genomes.keySet().join(", ")}\n" +
+                "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+>>>>>>> TEMPLATE
             System.exit(1)
         }
     }
diff --git a/lib/WorkflowMain.groovy b/lib/WorkflowMain.groovy
index d7085e909f4863c6112cbfe13aeed085cdad4d2d..cafb54c35236256cfd15b259782498b839a340ed 100755
--- a/lib/WorkflowMain.groovy
+++ b/lib/WorkflowMain.groovy
@@ -60,6 +60,12 @@ class WorkflowMain {
         // Print parameter summary log to screen
         log.info paramsSummaryLog(workflow, params, log)
 
+<<<<<<< HEAD
+=======
+        // Check that a -profile or Nextflow config has been provided to run the pipeline
+        NfcoreTemplate.checkConfigProvided(workflow, log)
+
+>>>>>>> TEMPLATE
         // Check that conda channels are set-up correctly
         if (params.enable_conda) {
             Utils.checkCondaChannels(log)
@@ -68,9 +74,12 @@ class WorkflowMain {
         // Check AWS batch settings
         NfcoreTemplate.awsBatch(workflow, params)
 
+<<<<<<< HEAD
         // Check the hostnames against configured profiles
         NfcoreTemplate.hostName(workflow, params, log)
 
+=======
+>>>>>>> TEMPLATE
         // Check input has been provided
         if (!params.input) {
             log.error "Please provide an input samplesheet to the pipeline e.g. '--input samplesheet.csv'"
diff --git a/main.nf b/main.nf
index 18b7438d72a35fd8082cc3ff27009887d2bf0719..82aaf0f200b77db6710472f821c715376d2e5668 100644
--- a/main.nf
+++ b/main.nf
@@ -1,8 +1,8 @@
 #!/usr/bin/env nextflow
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     nf-core/hic
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     Github : https://github.com/nf-core/hic
     Website: https://nf-co.re/hic
     Slack  : https://nfcore.slack.com/channels/hic
@@ -12,26 +12,26 @@
 nextflow.enable.dsl = 2
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     GENOME PARAMETER VALUES
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 params.fasta = WorkflowMain.getGenomeAttribute(params, 'fasta')
 params.bwt2_index = WorkflowMain.getGenomeAttribute(params, 'bowtie2')
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     VALIDATE & PRINT PARAMETER SUMMARY
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 WorkflowMain.initialise(workflow, params, log)
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     NAMED WORKFLOW FOR PIPELINE
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 include { HIC } from './workflows/hic'
@@ -44,9 +44,9 @@ workflow NFCORE_HIC {
 }
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     RUN ALL WORKFLOWS
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 //
@@ -58,7 +58,7 @@ workflow {
 }
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     THE END
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
diff --git a/modules.json b/modules.json
index 53a88ac19cc1c065bb8d6d2f1b1b29040d5ddc44..4a090fd6e6f1a31b03ba0a8e2fbe9ea1f2633747 100644
--- a/modules.json
+++ b/modules.json
@@ -23,4 +23,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/local/samplesheet_check.nf b/modules/local/samplesheet_check.nf
index f95f0032b1db11aa92ccac4295b3342d3cb68c5f..ac33a0e3231570c9b4c102dfd30e22435f32042d 100644
--- a/modules/local/samplesheet_check.nf
+++ b/modules/local/samplesheet_check.nf
@@ -1,25 +1,28 @@
-// Import generic module functions
 
 process SAMPLESHEET_CHECK {
     tag "$samplesheet"
 
     conda (params.enable_conda ? "conda-forge::python=3.8.3" : null)
-    if (workflow.containerEngine == 'singularity' && !params.singularity_pull_docker_container) {
-        container "https://depot.galaxyproject.org/singularity/python:3.8.3"
-    } else {
-        container "quay.io/biocontainers/python:3.8.3"
-    }
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/python:3.8.3' :
+        'quay.io/biocontainers/python:3.8.3' }"
 
     input:
     path samplesheet
 
     output:
-    path '*.csv'
+    path '*.csv'       , emit: csv
+    path "versions.yml", emit: versions
 
-    script:
+    script: // This script is bundled with the pipeline, in nf-core/hic/bin/
     """
     check_samplesheet.py \\
         $samplesheet \\
         samplesheet.valid.csv
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        python: \$(python --version | sed 's/Python //g')
+    END_VERSIONS
     """
 }
diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py b/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
index fda9fc3f809f496767975f3df49d0148d98611be..d13903925467e97e353f0a4e6bcf9f6cdb8a3664 100644
--- a/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
+++ b/modules/nf-core/modules/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
@@ -51,8 +51,8 @@ versions_this_module["${task.process}"] = {
 }
 
 with open("$versions") as f:
-    versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) #| versions_this_module
-    
+    versions_by_process = yaml.load(f, Loader=yaml.BaseLoader) | versions_this_module
+
 # aggregate versions by the module name (derived from fully-qualified process name)
 versions_by_module = {}
 for process, process_versions in versions_by_process.items():
diff --git a/modules/nf-core/modules/multiqc/main.nf b/modules/nf-core/modules/multiqc/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..1264aac1ebfc902ae6633862472b412cd929656a
--- /dev/null
+++ b/modules/nf-core/modules/multiqc/main.nf
@@ -0,0 +1,31 @@
+process MULTIQC {
+    label 'process_medium'
+
+    conda (params.enable_conda ? 'bioconda::multiqc=1.12' : null)
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/multiqc:1.12--pyhdfd78af_0' :
+        'quay.io/biocontainers/multiqc:1.12--pyhdfd78af_0' }"
+
+    input:
+    path multiqc_files
+
+    output:
+    path "*multiqc_report.html", emit: report
+    path "*_data"              , emit: data
+    path "*_plots"             , optional:true, emit: plots
+    path "versions.yml"        , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ''
+    """
+    multiqc -f $args .
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" )
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/modules/multiqc/meta.yml b/modules/nf-core/modules/multiqc/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6fa891efc2c607fa6e1d081171b1bf2a710443ab
--- /dev/null
+++ b/modules/nf-core/modules/multiqc/meta.yml
@@ -0,0 +1,40 @@
+name: MultiQC
+description: Aggregate results from bioinformatics analyses across many samples into a single report
+keywords:
+  - QC
+  - bioinformatics tools
+  - Beautiful stand-alone HTML report
+tools:
+  - multiqc:
+      description: |
+        MultiQC searches a given directory for analysis logs and compiles a HTML report.
+        It's a general use tool, perfect for summarising the output from numerous bioinformatics tools.
+      homepage: https://multiqc.info/
+      documentation: https://multiqc.info/docs/
+      licence: ["GPL-3.0-or-later"]
+input:
+  - multiqc_files:
+      type: file
+      description: |
+        List of reports / files recognised by MultiQC, for example the html and zip output of FastQC
+output:
+  - report:
+      type: file
+      description: MultiQC report file
+      pattern: "multiqc_report.html"
+  - data:
+      type: dir
+      description: MultiQC data dir
+      pattern: "multiqc_data"
+  - plots:
+      type: file
+      description: Plots created by MultiQC
+      pattern: "*_data"
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+authors:
+  - "@abhi18av"
+  - "@bunop"
+  - "@drpatelh"
diff --git a/nextflow.config b/nextflow.config
index 2cd1618f2231064f95ff654bdd15ad3e37d22de8..edcb3878a573997467f9964f645479fe8aecf62f 100644
--- a/nextflow.config
+++ b/nextflow.config
@@ -1,7 +1,7 @@
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     nf-core/hic Nextflow config file
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     Default config options for all compute environments
 ----------------------------------------------------------------------------------------
 */
@@ -105,14 +105,12 @@ params {
     help                       = false
     validate_params            = true
     show_hidden_params         = false
-    schema_ignore_params       = 'genomes,modules'
+    schema_ignore_params       = 'genomes'
     enable_conda               = false
-    singularity_pull_docker_container = false
 
     // Config options
     custom_config_version      = 'master'
     custom_config_base         = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
-    hostnames                  = [:]
     config_profile_description = null
     config_profile_contact     = null
     config_profile_url         = null
@@ -139,12 +137,14 @@ try {
     System.err.println("WARNING: Could not load nf-core/config profiles: ${params.custom_config_base}/nfcore_custom.config")
 }
 
-// Load igenomes.config if required
-if (!params.igenomes_ignore) {
-    includeConfig 'conf/igenomes.config'
-} else {
-    params.genomes = [:]
-}
+// Load nf-core/hic custom profiles from different institutions.
+// Warning: Uncomment only if a pipeline-specific instititutional config already exists on nf-core/configs!
+// try {
+//   includeConfig "${params.custom_config_base}/pipeline/hic.config"
+// } catch (Exception e) {
+//   System.err.println("WARNING: Could not load nf-core/config/hic profiles: ${params.custom_config_base}/pipeline/hic.config")
+// }
+
 
 profiles {
     debug { process.beforeScript = 'echo $HOSTNAME' }
@@ -197,11 +197,22 @@ profiles {
     test_full { includeConfig 'conf/test_full.config' }
 }
 
+// Load igenomes.config if required
+if (!params.igenomes_ignore) {
+    includeConfig 'conf/igenomes.config'
+} else {
+    params.genomes = [:]
+}
+
 // Export these variables to prevent local Python/R libraries from conflicting with those in the container
+// The JULIA depot path has been adjusted to a fixed path `/usr/local/share/julia` that needs to be used for packages in the container.
+// See https://apeltzer.github.io/post/03-julia-lang-nextflow/ for details on that. Once we have a common agreement on where to keep Julia packages, this is adjustable.
+
 env {
     PYTHONNOUSERSITE = 1
     R_PROFILE_USER   = "/.Rprofile"
     R_ENVIRON_USER   = "/.Renviron"
+    JULIA_DEPOT_PATH = "/usr/local/share/julia"
 }
 
 // Capture exit codes from upstream processes when piping
@@ -231,10 +242,13 @@ manifest {
     homePage        = 'https://github.com/nf-core/hic'
     description     = 'Analysis of Chromosome Conformation Capture data (Hi-C)'
     mainScript      = 'main.nf'
-    nextflowVersion = '!>=21.04.0'
-    version = '1.4.0dev'
+    nextflowVersion = '!>=21.10.3'
+    version         = '1.4.0dev'
 }
 
+// Load modules.config for DSL2 module specific options
+includeConfig 'conf/modules.config'
+
 // Function to ensure that resource requirements don't go beyond
 // a maximum limit
 def check_max(obj, type) {
diff --git a/nextflow_schema.json b/nextflow_schema.json
index ffd2c0bce59dd59382295e1e96799ad9570237e2..ad0cab5329262ca66476703c6f93ca0ca441a811 100644
--- a/nextflow_schema.json
+++ b/nextflow_schema.json
@@ -10,20 +10,22 @@
             "type": "object",
             "fa_icon": "fas fa-terminal",
             "description": "Define where the pipeline should find input data and save output data.",
-            "required": [
-                "input"
-            ],
+            "required": ["input", "outdir"],
             "properties": {
                 "input": {
                     "type": "string",
-                    "fa_icon": "fas fa-dna",
-                    "description": "Input FastQ files.",
-                    "help_text": "Use this to specify the location of your input FastQ files. For example:\n\n```bash\n--input 'path/to/data/sample_*_{1,2}.fastq'\n```\n\nPlease note the following requirements:\n\n1. The path must be enclosed in quotes\n2. The path must have at least one `*` wildcard character\n3. When using the pipeline with paired end data, the path must use `{1,2}` notation to specify read pairs.\n\nIf left unspecified, a default pattern is used: `data/*{1,2}.fastq.gz`"
+                    "format": "file-path",
+                    "mimetype": "text/csv",
+                    "pattern": "^\\S+\\.csv$",
+                    "schema": "assets/schema_input.json",
+                    "description": "Path to comma-separated file containing information about the samples in the experiment.",
+                    "help_text": "You will need to create a design file with information about the samples in your experiment before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 3 columns, and a header row. See [usage docs](https://nf-co.re/hic/usage#samplesheet-input).",
+                    "fa_icon": "fas fa-file-csv"
                 },
                 "outdir": {
                     "type": "string",
-                    "description": "The output directory where the results will be saved.",
-                    "default": "./results",
+                    "format": "directory-path",
+                    "description": "The output directory where the results will be saved. You have to use absolute paths to storage on Cloud infrastructure.",
                     "fa_icon": "fas fa-folder-open"
                 },
                 "email": {
@@ -32,6 +34,11 @@
                     "fa_icon": "fas fa-envelope",
                     "help_text": "Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits. If set in your user config file (`~/.nextflow/config`) then you don't need to specify this on the command line for every run.",
                     "pattern": "^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$"
+                },
+                "multiqc_title": {
+                    "type": "string",
+                    "description": "MultiQC report title. Printed as page header, used for filename if not otherwise specified.",
+                    "fa_icon": "fas fa-file-signature"
                 }
             }
         },
@@ -39,22 +46,26 @@
             "title": "Reference genome options",
             "type": "object",
             "fa_icon": "fas fa-dna",
-            "description": "Options for the reference genome indices used to align reads.",
+            "description": "Reference genome related files and options required for the workflow.",
             "properties": {
                 "genome": {
                     "type": "string",
                     "description": "Name of iGenomes reference.",
                     "fa_icon": "fas fa-book",
-                    "help_text": "If using a reference genome configured in the pipeline using iGenomes, use this parameter to give the ID for the reference. This is then used to build the full paths for all required reference genome files e.g. `--genome GRCh38`.\n\nSee the [nf-core website docs](https://nf-co.re/usage/reference_genomes) for more details."
+                    "help_text": "If using a reference genome configured in the pipeline using iGenomes, use this parameter to give the ID for the reference. This is then used to build the full paths for all required reference genome files e.g. `--genome GRCh38`. \n\nSee the [nf-core website docs](https://nf-co.re/usage/reference_genomes) for more details."
                 },
                 "fasta": {
                     "type": "string",
-                    "fa_icon": "fas fa-font",
+                    "format": "file-path",
+                    "mimetype": "text/plain",
+                    "pattern": "^\\S+\\.fn?a(sta)?(\\.gz)?$",
                     "description": "Path to FASTA genome file.",
-                    "help_text": "If you have no genome reference available, the pipeline can build one using a FASTA file. This requires additional time and resources, so it's better to use a pre-build index if possible."
+                    "help_text": "This parameter is *mandatory* if `--genome` is not specified. If you don't have a BWA index available this will be generated for you automatically. Combine with `--save_reference` to save BWA index for future runs.",
+                    "fa_icon": "far fa-file-code"
                 },
                 "igenomes_base": {
                     "type": "string",
+                    "format": "directory-path",
                     "description": "Directory / URL base for iGenomes references.",
                     "default": "s3://ngi-igenomes/igenomes",
                     "fa_icon": "fas fa-cloud-download-alt",
@@ -317,91 +328,50 @@
                 }
             }
         },
-        "generic_options": {
-            "title": "Generic options",
+        "institutional_config_options": {
+            "title": "Institutional config options",
             "type": "object",
-            "fa_icon": "fas fa-file-import",
-            "description": "Less common options for the pipeline, typically set in a config file.",
-            "help_text": "These options are common to all nf-core pipelines and allow you to customise some of the core preferences for how the pipeline runs.\n\nTypically these options would be set in a Nextflow config file loaded for all pipeline runs, such as `~/.nextflow/config`.",
+            "fa_icon": "fas fa-university",
+            "description": "Parameters used to describe centralised config profiles. These should not be edited.",
+            "help_text": "The centralised nf-core configuration profiles use a handful of pipeline parameters to describe themselves. This information is then printed to the Nextflow log when you run a pipeline. You should not need to change these values when you run a pipeline.",
             "properties": {
-                "help": {
-                    "type": "boolean",
-                    "description": "Display help text.",
+                "custom_config_version": {
+                    "type": "string",
+                    "description": "Git commit id for Institutional configs.",
+                    "default": "master",
                     "hidden": true,
-                    "fa_icon": "fas fa-question-circle"
+                    "fa_icon": "fas fa-users-cog"
                 },
-                "publish_dir_mode": {
+                "custom_config_base": {
                     "type": "string",
-                    "default": "copy",
+                    "description": "Base directory for Institutional configs.",
+                    "default": "https://raw.githubusercontent.com/nf-core/configs/master",
                     "hidden": true,
-                    "description": "Method used to save pipeline results to output directory.",
-                    "help_text": "The Nextflow `publishDir` option specifies which intermediate files should be saved to the output directory. This option tells the pipeline what method should be used to move these files. See [Nextflow docs](https://www.nextflow.io/docs/latest/process.html#publishdir) for details.",
-                    "fa_icon": "fas fa-copy",
-                    "enum": [
-                        "symlink",
-                        "rellink",
-                        "link",
-                        "copy",
-                        "copyNoFollow",
-                        "move"
-                    ]
+                    "help_text": "If you're running offline, Nextflow will not be able to fetch the institutional config files from the internet. If you don't need them, then this is not a problem. If you do need them, you should download the files from the repo and tell Nextflow where to find them with this parameter.",
+                    "fa_icon": "fas fa-users-cog"
                 },
-                "validate_params": {
-                    "type": "boolean",
-                    "description": "Boolean whether to validate parameters against the schema at runtime",
-                    "default": true,
-                    "fa_icon": "fas fa-check-square",
+                "config_profile_name": {
+                    "type": "string",
+                    "description": "Institutional config name",
                     "hidden": true
                 },
-                "email_on_fail": {
+                "config_profile_description": {
                     "type": "string",
-                    "description": "Email address for completion summary, only when pipeline fails.",
-                    "fa_icon": "fas fa-exclamation-triangle",
-                    "pattern": "^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$",
-                    "hidden": true,
-                    "help_text": "This works exactly as with `--email`, except emails are only sent if the workflow is not successful."
-                },
-                "plaintext_email": {
-                    "type": "boolean",
-                    "description": "Send plain-text email instead of HTML.",
-                    "fa_icon": "fas fa-remove-format",
+                    "description": "Institutional config description.",
                     "hidden": true,
-                    "help_text": "Set to receive plain-text e-mails instead of HTML formatted."
+                    "fa_icon": "fas fa-users-cog"
                 },
-                "max_multiqc_email_size": {
+                "config_profile_contact": {
                     "type": "string",
-                    "description": "File size limit when attaching MultiQC reports to summary emails.",
-                    "default": "25.MB",
-                    "fa_icon": "fas fa-file-upload",
-                    "hidden": true,
-                    "help_text": "If file generated by pipeline exceeds the threshold, it will not be attached."
-                },
-                "monochrome_logs": {
-                    "type": "boolean",
-                    "description": "Do not use coloured log outputs.",
-                    "fa_icon": "fas fa-palette",
+                    "description": "Institutional config contact information.",
                     "hidden": true,
-                    "help_text": "Set to disable colourful command line output and live life in monochrome."
-                },
-                "multiqc_config": {
-                    "type": "string",
-                    "description": "Custom config file to supply to MultiQC.",
-                    "fa_icon": "fas fa-cog",
-                    "hidden": true
+                    "fa_icon": "fas fa-users-cog"
                 },
-                "tracedir": {
+                "config_profile_url": {
                     "type": "string",
-                    "description": "Directory to keep pipeline Nextflow logs and reports.",
-                    "default": "${params.outdir}/pipeline_info",
-                    "fa_icon": "fas fa-cogs",
-                    "hidden": true
-                },
-                "show_hidden_params": {
-                    "type": "boolean",
-                    "fa_icon": "far fa-eye-slash",
-                    "description": "Show all params when using `--help`",
+                    "description": "Institutional config URL link.",
                     "hidden": true,
-                    "help_text": "By default, parameters set as _hidden_ in the schema are not shown on the command line when a user runs with `--help`. Specifying this option will tell the pipeline to show all parameters."
+                    "fa_icon": "fas fa-users-cog"
                 }
             }
         },
@@ -414,7 +384,7 @@
             "properties": {
                 "max_cpus": {
                     "type": "integer",
-                    "description": "Maximum number of CPUs that can be requested    for any single job.",
+                    "description": "Maximum number of CPUs that can be requested for any single job.",
                     "default": 16,
                     "fa_icon": "fas fa-microchip",
                     "hidden": true,
@@ -440,57 +410,88 @@
                 }
             }
         },
-        "institutional_config_options": {
-            "title": "Institutional config options",
+        "generic_options": {
+            "title": "Generic options",
             "type": "object",
-            "fa_icon": "fas fa-university",
-            "description": "Parameters used to describe centralised config profiles. These should not be edited.",
-            "help_text": "The centralised nf-core configuration profiles use a handful of pipeline parameters to describe themselves. This information is then printed to the Nextflow log when you run a pipeline. You should not need to change these values when you run a pipeline.",
+            "fa_icon": "fas fa-file-import",
+            "description": "Less common options for the pipeline, typically set in a config file.",
+            "help_text": "These options are common to all nf-core pipelines and allow you to customise some of the core preferences for how the pipeline runs.\n\nTypically these options would be set in a Nextflow config file loaded for all pipeline runs, such as `~/.nextflow/config`.",
             "properties": {
-                "custom_config_version": {
-                    "type": "string",
-                    "description": "Git commit id for Institutional configs.",
-                    "default": "master",
-                    "hidden": true,
-                    "fa_icon": "fas fa-users-cog",
-                    "help_text": "Provide git commit id for custom Institutional configs hosted at `nf-core/configs`. This was implemented for reproducibility purposes. Default: `master`.\n\n```bash\n## Download and use config file with following git commit id\n--custom_config_version d52db660777c4bf36546ddb188ec530c3ada1b96\n```"
+                "help": {
+                    "type": "boolean",
+                    "description": "Display help text.",
+                    "fa_icon": "fas fa-question-circle",
+                    "hidden": true
                 },
-                "custom_config_base": {
+                "publish_dir_mode": {
                     "type": "string",
-                    "description": "Base directory for Institutional configs.",
-                    "default": "https://raw.githubusercontent.com/nf-core/configs/master",
-                    "hidden": true,
-                    "help_text": "If you're running offline, nextflow will not be able to fetch the institutional config files from the internet. If you don't need them, then this is not a problem. If you do need them, you should download the files from the repo and tell nextflow where to find them with the `custom_config_base` option. For example:\n\n```bash\n## Download and unzip the config files\ncd /path/to/my/configs\nwget https://github.com/nf-core/configs/archive/master.zip\nunzip master.zip\n\n## Run the pipeline\ncd /path/to/my/data\nnextflow run /path/to/pipeline/ --custom_config_base /path/to/my/configs/configs-master/\n```\n\n> Note that the nf-core/tools helper package has a `download` command to download all required pipeline files + singularity containers + institutional configs in one go for you, to make this process easier.",
-                    "fa_icon": "fas fa-users-cog"
+                    "default": "copy",
+                    "description": "Method used to save pipeline results to output directory.",
+                    "help_text": "The Nextflow `publishDir` option specifies which intermediate files should be saved to the output directory. This option tells the pipeline what method should be used to move these files. See [Nextflow docs](https://www.nextflow.io/docs/latest/process.html#publishdir) for details.",
+                    "fa_icon": "fas fa-copy",
+                    "enum": ["symlink", "rellink", "link", "copy", "copyNoFollow", "move"],
+                    "hidden": true
                 },
-                "hostnames": {
+                "email_on_fail": {
                     "type": "string",
-                    "description": "Institutional configs hostname.",
-                    "hidden": true,
-                    "fa_icon": "fas fa-users-cog"
+                    "description": "Email address for completion summary, only when pipeline fails.",
+                    "fa_icon": "fas fa-exclamation-triangle",
+                    "pattern": "^([a-zA-Z0-9_\\-\\.]+)@([a-zA-Z0-9_\\-\\.]+)\\.([a-zA-Z]{2,5})$",
+                    "help_text": "An email address to send a summary email to when the pipeline is completed - ONLY sent if the pipeline does not exit successfully.",
+                    "hidden": true
                 },
-                "config_profile_name": {
+                "plaintext_email": {
+                    "type": "boolean",
+                    "description": "Send plain-text email instead of HTML.",
+                    "fa_icon": "fas fa-remove-format",
+                    "hidden": true
+                },
+                "max_multiqc_email_size": {
                     "type": "string",
-                    "description": "Institutional config name",
+                    "description": "File size limit when attaching MultiQC reports to summary emails.",
+                    "pattern": "^\\d+(\\.\\d+)?\\.?\\s*(K|M|G|T)?B$",
+                    "default": "25.MB",
+                    "fa_icon": "fas fa-file-upload",
                     "hidden": true
                 },
-                "config_profile_description": {
+                "monochrome_logs": {
+                    "type": "boolean",
+                    "description": "Do not use coloured log outputs.",
+                    "fa_icon": "fas fa-palette",
+                    "hidden": true
+                },
+                "multiqc_config": {
                     "type": "string",
-                    "description": "Institutional config description.",
-                    "hidden": true,
-                    "fa_icon": "fas fa-users-cog"
+                    "description": "Custom config file to supply to MultiQC.",
+                    "fa_icon": "fas fa-cog",
+                    "hidden": true
                 },
-                "config_profile_contact": {
+                "tracedir": {
                     "type": "string",
-                    "description": "Institutional config contact information.",
+                    "description": "Directory to keep pipeline Nextflow logs and reports.",
+                    "default": "${params.outdir}/pipeline_info",
+                    "fa_icon": "fas fa-cogs",
+                    "hidden": true
+                },
+                "validate_params": {
+                    "type": "boolean",
+                    "description": "Boolean whether to validate parameters against the schema at runtime",
+                    "default": true,
+                    "fa_icon": "fas fa-check-square",
+                    "hidden": true
+                },
+                "show_hidden_params": {
+                    "type": "boolean",
+                    "fa_icon": "far fa-eye-slash",
+                    "description": "Show all params when using `--help`",
                     "hidden": true,
-                    "fa_icon": "fas fa-users-cog"
+                    "help_text": "By default, parameters set as _hidden_ in the schema are not shown on the command line when a user runs with `--help`. Specifying this option will tell the pipeline to show all parameters."
                 },
-                "config_profile_url": {
-                    "type": "string",
-                    "description": "Institutional config URL link.",
+                "enable_conda": {
+                    "type": "boolean",
+                    "description": "Run this workflow with Conda. You can also use '-profile conda' instead of providing this parameter.",
                     "hidden": true,
-                    "fa_icon": "fas fa-users-cog"
+                    "fa_icon": "fas fa-bacon"
                 }
             }
         }
@@ -525,12 +526,14 @@
         },
         {
             "$ref": "#/definitions/generic_options"
+	},
+            "$ref": "#/definitions/institutional_config_options"
         },
         {
             "$ref": "#/definitions/max_job_request_options"
         },
         {
-            "$ref": "#/definitions/institutional_config_options"
+            "$ref": "#/definitions/generic_options"
         }
     ]
 }
diff --git a/workflows/hic.nf b/workflows/hic.nf
index 0940deaa91a5627ac25d07101dd9287bd23ebd4a..4592e83b4da49ee8c7ababb8cae9b2cce7f93b39 100644
--- a/workflows/hic.nf
+++ b/workflows/hic.nf
@@ -1,12 +1,11 @@
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     VALIDATE INPUTS
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 def summary_params = NfcoreSchema.paramsSummaryMap(workflow, params)
 
-//*****************************************
 // Validate input parameters
 WorkflowHic.initialise(params, log)
 
@@ -16,6 +15,7 @@ checkPathParamList = [
     params.input, params.multiqc_config,
     params.fasta, params.bwt2_index
 ]
+
 for (param in checkPathParamList) { if (param) { file(param, checkIfExists: true) } }
 
 // Check mandatory parameters
@@ -80,18 +80,18 @@ if (params.res_compartments && !params.skip_compartments){
 
 ch_map_res = ch_map_res.unique()
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     CONFIG FILES
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 ch_multiqc_config        = file("$projectDir/assets/multiqc_config.yml", checkIfExists: true)
 ch_multiqc_custom_config = params.multiqc_config ? Channel.fromPath(params.multiqc_config) : Channel.empty()
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     IMPORT LOCAL MODULES/SUBWORKFLOWS
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 //
@@ -111,22 +111,23 @@ include { COMPARTMENTS } from '../subworkflows/local/compartments'
 include { TADS } from '../subworkflows/local/tads'
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     IMPORT NF-CORE MODULES/SUBWORKFLOWS
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 //
 // MODULE: Installed directly from nf-core/modules
 //
+
 include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/modules/custom/dumpsoftwareversions/main'
 include { FASTQC  } from '../modules/nf-core/modules/fastqc/main'
 //include { MULTIQC } from '../modules/nf-core/modules/multiqc/main'
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   CHANNELS
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 Channel.fromPath( params.fasta )
@@ -134,9 +135,9 @@ Channel.fromPath( params.fasta )
        .set { ch_fasta }
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     RUN MAIN WORKFLOW
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 // Info required for completion email and summary
@@ -277,9 +278,9 @@ workflow HIC {
 }
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     COMPLETION EMAIL AND SUMMARY
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
 workflow.onComplete {
@@ -290,7 +291,7 @@ workflow.onComplete {
 }
 
 /*
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     THE END
-========================================================================================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */