diff --git a/.github/.dockstore.yml b/.github/.dockstore.yml
index 030138a0ca97a91f378c4cd4d55e79ac4de1dc55..191fabd22a5c81f1db00e56be6e712ef01451512 100644
--- a/.github/.dockstore.yml
+++ b/.github/.dockstore.yml
@@ -3,3 +3,4 @@ version: 1.2
 workflows:
   - subclass: nfl
     primaryDescriptorPath: /nextflow.config
+    publish: True
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 25ef2ed3c3c87f3ab115e92de6d830fdb6718b4d..284970f00a1e6d4fa739c1faa0805642fdc0668f 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -69,7 +69,7 @@ If you wish to contribute a new step, please use the following coding standards:
 2. Write the process block (see below).
 3. Define the output channel if needed (see below).
 4. Add any new flags/options to `nextflow.config` with a default (see below).
-5. Add any new flags/options to `nextflow_schema.json` with help text (with `nf-core schema build .`)
+5. Add any new flags/options to `nextflow_schema.json` with help text (with `nf-core schema build .`).
 6. Add any new flags/options to the help message (for integer/text parameters, print to help the corresponding `nextflow.config` parameter).
 7. Add sanity checks for all relevant parameters.
 8. Add any new software to the `scrape_software_versions.py` script in `bin/` and the version command to the `scrape_software_versions` process in `main.nf`.
@@ -87,7 +87,7 @@ Once there, use `nf-core schema build .` to add to `nextflow_schema.json`.
 
 ### Default processes resource requirements
 
-Sensible defaults for process resource requirements (CPUs / memory / time) for a process should be defined in `conf/base.config`. These should generally be specified generic with `withLabel:` selectors so they can be shared across multiple processes/steps of the pipeline. A nf-core standard set of labels that should be followed where possible can be seen in the [nf-core pipeline template](https://github.com/nf-core/tools/blob/master/nf_core/pipeline-template/%7B%7Bcookiecutter.name_noslash%7D%7D/conf/base.config), which has the default process as a single core-process, and then different levels of multi-core configurations for increasingly large memory requirements defined with standardised labels.
+Sensible defaults for process resource requirements (CPUs / memory / time) for a process should be defined in `conf/base.config`. These should generally be specified generic with `withLabel:` selectors so they can be shared across multiple processes/steps of the pipeline. A nf-core standard set of labels that should be followed where possible can be seen in the [nf-core pipeline template](https://github.com/nf-core/tools/blob/master/nf_core/pipeline-template/conf/base.config), which has the default process as a single core-process, and then different levels of multi-core configurations for increasingly large memory requirements defined with standardised labels.
 
 The process resources can be passed on to the tool dynamically within the process with the `${task.cpu}` and `${task.memory}` variables in the `script:` block.
 
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 5ac2f7f52734df2cde27f756dcf7de5e4525463c..eeaa8479de91fb0b11752b812570bdb062fd1b40 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -55,7 +55,7 @@ Have you provided the following extra information/files:
 
 ## Container engine
 
-- Engine: <!-- [e.g. Conda, Docker, Singularity or Podman] -->
+- Engine: <!-- [e.g. Conda, Docker, Singularity, Podman, Shifter or Charliecloud] -->
 - version: <!-- [e.g. 1.0.0] -->
 - Image tag: <!-- [e.g. nfcore/hic:1.0.0] -->
 
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
index 2e01a5fe11f6ed4f3e5bfb4bcaff8c8b7bdc56d5..2cec9b3b778f87d420a0d124094557fe5b8efadf 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -1,6 +1,6 @@
 ---
 name: Feature request
-about: Suggest an idea for the nf-core website
+about: Suggest an idea for the nf-core/hic pipeline
 labels: enhancement
 ---
 
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index fe95321696a9a5b3a88b419e21e2e593e375d493..f94b9a453d3e42987ffbdd9a2d2be99e9e2b70ac 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -10,18 +10,18 @@ Remember that PRs should be made against the dev branch, unless you're preparing
 
 Learn more about contributing: [CONTRIBUTING.md](https://github.com/nf-core/hic/tree/master/.github/CONTRIBUTING.md)
 -->
+<!-- markdownlint-disable ul-indent -->
 
 ## PR checklist
 
 - [ ] This comment contains a description of changes (with reason).
 - [ ] If you've fixed a bug or added code that should be tested, add tests!
- - [ ] If you've added a new tool - add to the software_versions process and a regex to `scrape_software_versions.py`
- - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/hic/tree/master/.github/CONTRIBUTING.md)
- - [ ] If necessary, also make a PR on the nf-core/hic _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository.
+    - [ ] If you've added a new tool - add to the software_versions process and a regex to `scrape_software_versions.py`
+    - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](<https://github.com/>nf-core/hic/tree/master/.github/CONTRIBUTING.md)
+    - [ ] If necessary, also make a PR on the nf-core/hic _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository.
 - [ ] Make sure your code lints (`nf-core lint .`).
 - [ ] Ensure the test suite passes (`nextflow run . -profile test,docker`).
 - [ ] Usage Documentation in `docs/usage.md` is updated.
 - [ ] Output Documentation in `docs/output.md` is updated.
 - [ ] `CHANGELOG.md` is updated.
 - [ ] `README.md` is updated (including new tool citations and authors/contributors).
-
diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml
index 879955ba790402ab7ada28cc54320644e27ec0cb..cefb14a0ab730b7b0247a8efeb72a84da964bd92 100644
--- a/.github/workflows/awsfulltest.yml
+++ b/.github/workflows/awsfulltest.yml
@@ -9,6 +9,16 @@ on:
     types: [completed]
   workflow_dispatch:
 
+
+env:
+  AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+  AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+  TOWER_ACCESS_TOKEN: ${{ secrets.AWS_TOWER_TOKEN }}
+  AWS_JOB_DEFINITION: ${{ secrets.AWS_JOB_DEFINITION }}
+  AWS_JOB_QUEUE: ${{ secrets.AWS_JOB_QUEUE }}
+  AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
+
+
 jobs:
   run-awstest:
     name: Run AWS full tests
@@ -26,13 +36,6 @@ jobs:
         # Add full size test data (but still relatively small datasets for few samples)
         # on the `test_full.config` test runs with only one set of parameters
         # Then specify `-profile test_full` instead of `-profile test` on the AWS batch command
-        env:
-          AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
-          AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
-          TOWER_ACCESS_TOKEN: ${{ secrets.AWS_TOWER_TOKEN }}
-          AWS_JOB_DEFINITION: ${{ secrets.AWS_JOB_DEFINITION }}
-          AWS_JOB_QUEUE: ${{ secrets.AWS_JOB_QUEUE }}
-          AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
         run: |
           aws batch submit-job \
             --region eu-west-1 \
diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml
index ee179a28749abcadb808634e8f682575513f8a99..c9eafe60d1f9e232718fe7cd392ba09b984a06ce 100644
--- a/.github/workflows/awstest.yml
+++ b/.github/workflows/awstest.yml
@@ -6,6 +6,16 @@ name: nf-core AWS test
 on:
   workflow_dispatch:
 
+
+env:
+  AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+  AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+  TOWER_ACCESS_TOKEN: ${{ secrets.AWS_TOWER_TOKEN }}
+  AWS_JOB_DEFINITION: ${{ secrets.AWS_JOB_DEFINITION }}
+  AWS_JOB_QUEUE: ${{ secrets.AWS_JOB_QUEUE }}
+  AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
+
+
 jobs:
   run-awstest:
     name: Run AWS tests
@@ -22,13 +32,6 @@ jobs:
       - name: Start AWS batch job
         # For example: adding multiple test runs with different parameters
         # Remember that you can parallelise this by using strategy.matrix
-        env:
-          AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
-          AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
-          TOWER_ACCESS_TOKEN: ${{ secrets.AWS_TOWER_TOKEN }}
-          AWS_JOB_DEFINITION: ${{ secrets.AWS_JOB_DEFINITION }}
-          AWS_JOB_QUEUE: ${{ secrets.AWS_JOB_QUEUE }}
-          AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
         run: |
           aws batch submit-job \
           --region eu-west-1 \
diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml
index 92136a144c3e48bc1a11a7c893f2d69f958dd65d..3521022c4d8fd8f3ea7171bef94d9c3c96e2514d 100644
--- a/.github/workflows/branch.yml
+++ b/.github/workflows/branch.yml
@@ -13,7 +13,7 @@ jobs:
       - name: Check PRs
         if: github.repository == 'nf-core/hic'
         run: |
-          { [[ ${{github.event.pull_request.head.repo.full_name}} == nf-core/hic ]] && [[ $GITHUB_HEAD_REF = "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]]
+          { [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/hic ]] && [[ $GITHUB_HEAD_REF = "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]]
 
 
       # If the above check failed, post a comment on the PR explaining the failure
@@ -23,13 +23,22 @@ jobs:
         uses: mshick/add-pr-comment@v1
         with:
           message: |
+            ## This PR is against the `master` branch :x:
+
+            * Do not close this PR
+            * Click _Edit_ and change the `base` to `dev`
+            * This CI test will remain failed until you push a new commit
+
+            ---
+
             Hi @${{ github.event.pull_request.user.login }},
 
-            It looks like this pull-request is has been made against the ${{github.event.pull_request.head.repo.full_name}} `master` branch.
+            It looks like this pull-request is has been made against the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `master` branch.
             The `master` branch on nf-core repositories should always contain code from the latest release.
-            Because of this, PRs to `master` are only allowed if they come from the ${{github.event.pull_request.head.repo.full_name}} `dev` branch.
+            Because of this, PRs to `master` are only allowed if they come from the [${{github.event.pull_request.head.repo.full_name }}](https://github.com/${{github.event.pull_request.head.repo.full_name }}) `dev` branch.
 
             You do not need to close this PR, you can change the target branch to `dev` by clicking the _"Edit"_ button at the top of this page.
+            Note that even after this, the test will continue to show as failing until you push a new commit.
 
             Thanks again for your contribution!
           repo-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index f7e6e00ae0bbef6fc886b20d0e30c766931cfe88..734d985bda7b57a590be1f60487f2220a45f8c71 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -8,6 +8,9 @@ on:
   release:
     types: [published]
 
+# Uncomment if we need an edge release of Nextflow again
+# env: NXF_EDGE: 1
+
 jobs:
   test:
     name: Run workflow tests
diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml
index 6f2be6b08786ec7559b42df36d16c10790e60172..fcde400cedbc1566f84e8a811e0b45a1c113df60 100644
--- a/.github/workflows/linting.yml
+++ b/.github/workflows/linting.yml
@@ -19,6 +19,34 @@ jobs:
         run: npm install -g markdownlint-cli
       - name: Run Markdownlint
         run: markdownlint ${GITHUB_WORKSPACE} -c ${GITHUB_WORKSPACE}/.github/markdownlint.yml
+
+      # If the above check failed, post a comment on the PR explaining the failure
+      - name: Post PR comment
+        if: failure()
+        uses: mshick/add-pr-comment@v1
+        with:
+          message: |
+            ## Markdown linting is failing
+
+            To keep the code consistent with lots of contributors, we run automated code consistency checks.
+            To fix this CI test, please run:
+
+            * Install `markdownlint-cli`
+                * On Mac: `brew install markdownlint-cli`
+                * Everything else: [Install `npm`](https://www.npmjs.com/get-npm) then [install `markdownlint-cli`](https://www.npmjs.com/package/markdownlint-cli) (`npm install -g markdownlint-cli`)
+            * Fix the markdown errors
+                * Automatically: `markdownlint . --config .github/markdownlint.yml --fix`
+                * Manually resolve anything left from `markdownlint . --config .github/markdownlint.yml`
+
+            Once you push these changes the test should pass, and you can hide this comment :+1:
+
+            We highly recommend setting up markdownlint in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help!
+
+            Thanks again for your contribution!
+          repo-token: ${{ secrets.GITHUB_TOKEN }}
+          allow-repeats: false
+
+
   YAML:
     runs-on: ubuntu-latest
     steps:
@@ -29,7 +57,34 @@ jobs:
       - name: Install yaml-lint
         run: npm install -g yaml-lint
       - name: Run yaml-lint
-        run: yamllint $(find ${GITHUB_WORKSPACE} -type f -name "*.yml")
+        run: yamllint $(find ${GITHUB_WORKSPACE} -type f -name "*.yml" -o -name "*.yaml")
+
+      # If the above check failed, post a comment on the PR explaining the failure
+      - name: Post PR comment
+        if: failure()
+        uses: mshick/add-pr-comment@v1
+        with:
+          message: |
+            ## YAML linting is failing
+
+            To keep the code consistent with lots of contributors, we run automated code consistency checks.
+            To fix this CI test, please run:
+
+            * Install `yaml-lint`
+                * [Install `npm`](https://www.npmjs.com/get-npm) then [install `yaml-lint`](https://www.npmjs.com/package/yaml-lint) (`npm install -g yaml-lint`)
+            * Fix the markdown errors
+                * Run the test locally: `yamllint $(find . -type f -name "*.yml" -o -name "*.yaml")`
+                * Fix any reported errors in your YAML files
+
+            Once you push these changes the test should pass, and you can hide this comment :+1:
+
+            We highly recommend setting up yaml-lint in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help!
+
+            Thanks again for your contribution!
+          repo-token: ${{ secrets.GITHUB_TOKEN }}
+          allow-repeats: false
+
+
   nf-core:
     runs-on: ubuntu-latest
     steps:
@@ -69,8 +124,9 @@ jobs:
         if: ${{ always() }}
         uses: actions/upload-artifact@v2
         with:
-          name: linting-log-file
+          name: linting-logs
           path: |
             lint_log.txt
             lint_results.md
             PR_number.txt
+
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fd4557dde46498c38991bb1c9bc8a174e831e772..cbc5699280ee52cb6fdaba41e6587b219f840a62 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,14 +5,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 
 ## v1.3.0dev
 
-* Add HiCExplorer distance decay quality control
-* Add HiCExplorer TADs calling
-* Add insulation score TADs calling
-* Generate cooler/h5/txt contact maps
+* Fix bug in stats export
+* Change the /tmp/ folder by ./tmp/ folder so that all tmp files are now in the work (#24)
+* Add `--hicpro_maps` options to generate the raw and normalized HiC-Pro maps. The default is now to use cooler
+* Add HiCExplorer distance decay quality control (#54)
+* Add HiCExplorer TADs calling (#55)
+* Add insulation score TADs calling (#55)
+* Generate cooler/txt contact maps
 * Normalize Hi-C data with cooler instead of iced
 * New `--digestion` parameter to automatically set the restriction_site and ligation_site motifs
 * New `--keep_multi` and `keep_dup` options. Default: false
-* Template update for nf-core/tools v1.11
+* Template update for nf-core/tools
 * Minor fix to summary log messages in pipeline header
 
 ### `Fixed`
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index daea9ea82d791ff54b2b19755a09371e0ae330cc..f4fd052f1f1708b3c07343d19575b1e001e2928a 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,68 +1,111 @@
-# Contributor Covenant Code of Conduct
+# Code of Conduct at nf-core (v1.0)
 
 ## Our Pledge
 
-In the interest of fostering an open and welcoming environment, we as
-contributors and maintainers pledge to making participation in our project
-and our community a harassment-free experience for everyone, regardless of
-age, body size, disability, ethnicity, gender identity and expression, level
-of experience, nationality, personal appearance, race, religion, or sexual
-identity and orientation.
+In the interest of fostering an open, collaborative, and welcoming environment, we as contributors and maintainers of nf-core, pledge to making participation in our projects and community a harassment-free experience for everyone, regardless of:
 
-## Our Standards
+- Age
+- Body size
+- Familial status
+- Gender identity and expression
+- Geographical location
+- Level of experience
+- Nationality and national origins
+- Native language
+- Physical and neurological ability
+- Race or ethnicity
+- Religion
+- Sexual identity and orientation
+- Socioeconomic status
 
-Examples of behavior that contributes to creating a positive environment
-include:
+Please note that the list above is alphabetised and is therefore not ranked in any order of preference or importance.
 
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
+## Preamble
 
-Examples of unacceptable behavior by participants include:
+> Note: This Code of Conduct (CoC) has been drafted by the nf-core Safety Officer and been edited after input from members of the nf-core team and others. "We", in this document, refers to the Safety Officer and members of the nf-core core team, both of whom are deemed to be members of the nf-core community and are therefore required to abide by this Code of Conduct. This document will amended periodically to keep it up-to-date, and in case of any dispute, the most current version will apply.
 
-* The use of sexualized language or imagery and unwelcome sexual attention
-or advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
-address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
-professional setting
+An up-to-date list of members of the nf-core core team can be found [here](https://nf-co.re/about). Our current safety officer is Renuka Kudva.
+
+nf-core is a young and growing community that welcomes contributions from anyone with a shared vision for [Open Science Policies](https://www.fosteropenscience.eu/taxonomy/term/8). Open science policies encompass inclusive behaviours and we strive to build and maintain a safe and inclusive environment for all individuals.
+
+We have therefore adopted this code of conduct (CoC), which we require all members of our community and attendees in nf-core events to adhere to in all our workspaces at all times. Workspaces include but are not limited to Slack, meetings on Zoom, Jitsi, YouTube live etc.
+
+Our CoC will be strictly enforced and the nf-core team reserve the right to exclude participants who do not comply with our guidelines from our workspaces and future nf-core activities.
+
+We ask all members of our community to help maintain a supportive and productive workspace and to avoid behaviours that can make individuals feel unsafe or unwelcome. Please help us maintain and uphold this CoC.
+
+Questions, concerns or ideas on what we can include? Contact safety [at] nf-co [dot] re
 
 ## Our Responsibilities
 
-Project maintainers are responsible for clarifying the standards of acceptable
-behavior and are expected to take appropriate and fair corrective action in
-response to any instances of unacceptable behavior.
+The safety officer is responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behaviour.
+
+The safety officer in consultation with the nf-core core team have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+Members of the core team or the safety officer who violate the CoC will be required to recuse themselves pending investigation. They will not have access to any reports of the violations and be subject to the same actions as others in violation of the CoC.
+
+## When are where does this Code of Conduct apply?
+
+Participation in the nf-core community is contingent on following these guidelines in all our workspaces and events. This includes but is not limited to the following listed alphabetically and therefore in no order of preference:
+
+- Communicating with an official project email address.
+- Communicating with community members within the nf-core Slack channel.
+- Participating in hackathons organised by nf-core (both online and in-person events).
+- Participating in collaborative work on GitHub, Google Suite, community calls, mentorship meetings, email correspondence.
+- Participating in workshops, training, and seminar series organised by nf-core (both online and in-person events). This applies to events hosted on web-based platforms such as Zoom, Jitsi, YouTube live etc.
+- Representing nf-core on social media. This includes both official and personal accounts.
+
+## nf-core cares 😊
+
+nf-core's CoC and expectations of respectful behaviours for all participants (including organisers and the nf-core team) include but are not limited to the following (listed in alphabetical order):
+
+- Ask for consent before sharing another community member’s personal information (including photographs) on social media.
+- Be respectful of differing viewpoints and experiences. We are all here to learn from one another and a difference in opinion can present a good learning opportunity.
+- Celebrate your accomplishments at events! (Get creative with your use of emojis 🎉 🥳 💯 🙌 !)
+- Demonstrate empathy towards other community members. (We don’t all have the same amount of time to dedicate to nf-core. If tasks are pending, don’t hesitate to gently remind members of your team. If you are leading a task, ask for help if you feel overwhelmed.)
+- Engage with and enquire after others. (This is especially important given the geographically remote nature of the nf-core community, so let’s do this the best we can)
+- Focus on what is best for the team and the community. (When in doubt, ask)
+- Graciously accept constructive criticism, yet be unafraid to question, deliberate, and learn.
+- Introduce yourself to members of the community. (We’ve all been outsiders and we know that talking to strangers can be hard for some, but remember we’re interested in getting to know you and your visions for open science!)
+- Show appreciation and **provide clear feedback**. (This is especially important because we don’t see each other in person and it can be harder to interpret subtleties. Also remember that not everyone understands a certain language to the same extent as you do, so **be clear in your communications to be kind.**)
+- Take breaks when you feel like you need them.
+- Using welcoming and inclusive language. (Participants are encouraged to display their chosen pronouns on Zoom or in communication on Slack.)
+
+## nf-core frowns on 😕
+
+The following behaviours from any participants within the nf-core community (including the organisers) will be considered unacceptable under this code of conduct. Engaging or advocating for any of the following could result in expulsion from nf-core workspaces.
+
+- Deliberate intimidation, stalking or following and sustained disruption of communication among participants of the community. This includes hijacking shared screens through actions such as using the annotate tool in conferencing software such as Zoom.
+- “Doxing” i.e. posting (or threatening to post) another person’s personal identifying information online.
+- Spamming or trolling of individuals on social media.
+- Use of sexual or discriminatory imagery, comments, or jokes and unwelcome sexual attention.
+- Verbal and text comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, ability, physical appearance, body size, race, age, religion or work experience.
+
+### Online Trolling
+
+The majority of nf-core interactions and events are held online. Unfortunately, holding events online comes with the added issue of online trolling. This is unacceptable, reports of such behaviour will be taken very seriously, and perpetrators will be excluded from activities immediately.
+
+All community members are required to ask members of the group they are working within for explicit consent prior to taking screenshots of individuals during video calls.
+
+## Procedures for Reporting CoC violations
 
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct, or to ban temporarily or
-permanently any contributor for other behaviors that they deem inappropriate,
-threatening, offensive, or harmful.
+If someone makes you feel uncomfortable through their behaviours or actions, report it as soon as possible.
 
-## Scope
+You can reach out to members of the [nf-core core team](https://nf-co.re/about) and they will forward your concerns to the safety officer(s).
 
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community. Examples of
-representing a project or community include using an official project e-mail
-address, posting via an official social media account, or acting as an
-appointed representative at an online or offline event. Representation of a
-project may be further defined and clarified by project maintainers.
+Issues directly concerning members of the core team will be dealt with by other members of the core team and the safety manager, and possible conflicts of interest will be taken into account. nf-core is also in discussions about having an ombudsperson, and details will be shared in due course.
 
-## Enforcement
+All reports will be handled with utmost discretion and confidentially.
 
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team on [Slack](https://nf-co.re/join/slack). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+## Attribution and Acknowledgements
 
-Project maintainers who do not follow or enforce the Code of Conduct in good
-faith may face temporary or permanent repercussions as determined by other
-members of the project's leadership.
+- The [Contributor Covenant, version 1.4](http://contributor-covenant.org/version/1/4)
+- The [OpenCon 2017 Code of Conduct](http://www.opencon2017.org/code_of_conduct) (CC BY 4.0 OpenCon organisers, SPARC and Right to Research Coalition)
+- The [eLife innovation sprint 2020 Code of Conduct](https://sprint.elifesciences.org/code-of-conduct/)
+- The [Mozilla Community Participation Guidelines v3.1](https://www.mozilla.org/en-US/about/governance/policies/participation/) (version 3.1, CC BY-SA 3.0 Mozilla)
 
-## Attribution
+## Changelog
 
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [https://www.contributor-covenant.org/version/1/4/code-of-conduct/][version]
+### v1.0 - March 12th, 2021
 
-[homepage]: https://contributor-covenant.org
-[version]: https://www.contributor-covenant.org/version/1/4/code-of-conduct/
+- Complete rewrite from original [Contributor Covenant](http://contributor-covenant.org/) CoC.
diff --git a/Dockerfile b/Dockerfile
index 35ffbe997bc475e7d4ed69da9508c1c5a0a6e426..18af828827fec11b7c15b7278c9b4591b8bc00dc 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM nfcore/base:1.12.1
+FROM nfcore/base:1.14
 LABEL authors="Nicolas Servant" \
       description="Docker image containing all software requirements for the nf-core/hic pipeline"
 
diff --git a/README.md b/README.md
index 8fd90f44a6ff1119c92085f8d464a8ac3d5fe72c..cb88454ceec117d40a3f43251d85a67078085b99 100644
--- a/README.md
+++ b/README.md
@@ -24,9 +24,9 @@ In practice, this workflow was successfully applied to many data-sets including
 dilution Hi-C, in situ Hi-C, DNase Hi-C, Micro-C, capture-C, capture Hi-C or
 HiChip data.
 
-Contact maps are generated in standard formats including HiC-Pro, cooler, and h5 format for
+Contact maps are generated in standard formats including HiC-Pro, and cooler for
 downstream analysis and visualization.
-Addition analysis steps such as TADs calling are also available.
+Addition analysis steps such as compartments and TADs calling are also available.
 
 The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool
 to run tasks across multiple compute infrastructures in a very portable manner.
@@ -35,27 +35,30 @@ results highly reproducible.
 
 ## Pipeline summary
 
-1. Mapping using a two steps strategy to rescue reads spanning the ligation
-sites ([`bowtie2`](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml))
-2. Detection of valid interaction products([`HiC-Pro`](https://github.com/nservant/HiC-Pro))
-3. Duplicates removal
-4. Create genome-wide contact maps at various resolution ([`cooler`](https://github.com/open2c/cooler))
-5. Contact maps normalization using the ICE algorithm ([`cooler`](https://github.com/open2c/cooler))
-6. Export to various contact maps formats ([`HiC-Pro`](https://github.com/nservant/HiC-Pro), [`cooler`](https://github.com/open2c/cooler), [`HiCExplorer`](https://github.com/deeptools/HiCExplorer))
-7. Quality controls ([`HiC-Pro`](https://github.com/nservant/HiC-Pro), [`HiCExplorer`](https://github.com/deeptools/HiCExplorer))
-8. TADs calling ([`HiCExplorer`](https://github.com/deeptools/HiCExplorer), [`cooler`](https://github.com/open2c/cooler))
-9. Quality control report ([`MultiQC`](https://multiqc.info/))
+1. HiC-Pro data processing ([`HiC-Pro`](https://github.com/nservant/HiC-Pro))
+   1. Mapping using a two steps strategy to rescue reads spanning the ligation
+   sites ([`bowtie2`](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml))
+   2. Detection of valid interaction products
+   3. Duplicates removal
+   4. Generate raw and normalized contact maps ([`iced`](https://github.com/hiclib/iced))
+2. Create genome-wide contact maps at various resolutions ([`cooler`](https://github.com/open2c/cooler))
+3. Contact maps normalization using balancing algorithm ([`cooler`](https://github.com/open2c/cooler))
+4. Export to various contact maps formats ([`HiC-Pro`](https://github.com/nservant/HiC-Pro), [`cooler`](https://github.com/open2c/cooler))
+5. Quality controls ([`HiC-Pro`](https://github.com/nservant/HiC-Pro), [`HiCExplorer`](https://github.com/deeptools/HiCExplorer))
+6. Compartments calling ([`cooltools`](https://cooltools.readthedocs.io/en/latest/))
+7. TADs calling ([`HiCExplorer`](https://github.com/deeptools/HiCExplorer), [`cooltools`](https://cooltools.readthedocs.io/en/latest/))
+8. Quality control report ([`MultiQC`](https://multiqc.info/))
 
 ## Quick Start
 
-1. Install [`nextflow`](https://nf-co.re/usage/installation)
+1. Install [`nextflow`](https://nf-co.re/usage/installation) (`>=20.04.0`)
 
-2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_
+2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_
 
 3. Download the pipeline and test it on a minimal dataset with a single command
 
     ```bash
-    nextflow run nf-core/hic -profile test,<docker/singularity/podman/conda/institute>
+    nextflow run nf-core/hic -profile test,<docker/singularity/podman/shifter/charliecloud/conda/institute>
     ```
 
     > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation)
@@ -67,7 +70,7 @@ sites ([`bowtie2`](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml))
 4. Start running your own analysis!
 
     ```bash
-    nextflow run nf-core/hic -profile <docker/singularity/podman/conda/institute> --input '*_R{1,2}.fastq.gz' --genome GRCh37
+    nextflow run nf-core/hic -profile <docker/singularity/podman/shifter/charliecloud/conda/institute> --input '*_R{1,2}.fastq.gz' --genome GRCh37
     ```
 
 ## Documentation
@@ -99,7 +102,6 @@ You can cite the `nf-core` publication as follows:
 > Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.
 >
 > _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).
-> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ)
 
 In addition, references of tools and data used in this pipeline are as follows:
 
diff --git a/assets/email_template.html b/assets/email_template.html
index 177bccd2d802d3a11e2e6ca606e72ac19968126b..d207f011f3ab74ac04ace7a4e277c59900cf058a 100644
--- a/assets/email_template.html
+++ b/assets/email_template.html
@@ -1,6 +1,5 @@
 <html>
 <head>
-  <head>
   <meta charset="utf-8">
   <meta http-equiv="X-UA-Compatible" content="IE=edge">
   <meta name="viewport" content="width=device-width, initial-scale=1">
diff --git a/assets/nf-core-hic_logo.png b/assets/nf-core-hic_logo.png
index 6b364161664e70224fac3a83fb9f02ed0acbd9f8..37461d9a32ae1f73d9090a3a2387cf8997c9a0ed 100644
Binary files a/assets/nf-core-hic_logo.png and b/assets/nf-core-hic_logo.png differ
diff --git a/conf/base.config b/conf/base.config
index c301031e67fecd8f4899b5bbc53f2c3adae9dcd3..ddec1a8507ded18a2d81923bc87daea40963c346 100644
--- a/conf/base.config
+++ b/conf/base.config
@@ -42,4 +42,5 @@ process {
   withName:get_software_versions {
     cache = false
   }
+
 }
diff --git a/conf/test.config b/conf/test.config
index d06ad508f9c3cd0db4b2173b5fe7a890216369c5..5c5fc84c35989f039418aeba4bc5b5b1c10da1a6 100644
--- a/conf/test.config
+++ b/conf/test.config
@@ -35,4 +35,7 @@ params {
   res_tads = '1000'
   tads_caller = 'insulation,hicexplorer'
   res_compartments = '1000'
+  
+  // Ignore `--input` as otherwise the parameter validation will throw an error
+  schema_ignore_params = 'genomes,digest,input_paths,input'
 }
diff --git a/conf/test_full.config b/conf/test_full.config
index 65dcbf8f5ddbce6c5e46c3160461e87a3ee56e98..1e793cc57628bdbed6bbe322e558bffc0e15a3d1 100644
--- a/conf/test_full.config
+++ b/conf/test_full.config
@@ -11,8 +11,6 @@ params {
   config_profile_name = 'Full test profile'
   config_profile_description = 'Full test dataset to check pipeline function'
 
-  // TODO nf-core: Specify the paths to your full test data ( on nf-core/test-datasets or directly in repositories, e.g. SRA) 
-  // TODO nf-core: Give any required params for the test so that command line flags are not needed 
   // Input data for full size test
   input_paths = [
     ['SRR4292758_00', ['https://github.com/nf-core/test-datasets/raw/hic/data/SRR4292758_00_R1.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/hic/data/SRR4292758_00_R2.fastq.gz']]
@@ -20,19 +18,19 @@ params {
 
   // Annotations
   fasta = 'https://github.com/nf-core/test-datasets/raw/hic/reference/W303_SGD_2015_JRIU00000000.fsa'
-  restriction_site = 'A^AGCTT'
-  ligation_site = 'AAGCTAGCTT'
-  
-  min_mapq = 2
-  rm_dup = true
-  rm_singleton = true
-  rm_multi = true
-
+  digestion = 'hindiii'
+  min_mapq = 10
   min_restriction_fragment_size = 100
   max_restriction_fragment_size = 100000
   min_insert_size = 100
   max_insert_size = 600
+
+  bin_size = '1000'
+  res_dist_decay = '1000'
+  res_tads = '1000'
+  tads_caller = 'insulation,hicexplorer'
+  res_compartments = '1000'
   
-  // Options
-  skip_cool = true
+  // Ignore `--input` as otherwise the parameter validation will throw an error
+  schema_ignore_params = 'genomes,digest,input_paths,input'
 }
diff --git a/docs/images/nf-core-hic_logo.png b/docs/images/nf-core-hic_logo.png
index e5fead372861ff430d7f1428e15dad9b045523e8..274eb3dc3f3db879c7f3cbc3fd8f49a705a9a3fb 100644
Binary files a/docs/images/nf-core-hic_logo.png and b/docs/images/nf-core-hic_logo.png differ
diff --git a/docs/output.md b/docs/output.md
index d4092a050ff5954e5782bd0a8b12ffa0bdfe37a4..d73bce332fae54e6248816828b963d38b24f5eac 100644
--- a/docs/output.md
+++ b/docs/output.md
@@ -1,9 +1,5 @@
 # nf-core/hic: Output
 
-## :warning: Please read this documentation on the nf-core website: [https://nf-co.re/hic/output](https://nf-co.re/hic/output)
-
-> _Documentation of pipeline parameters is generated automatically from the pipeline schema and can no longer be found in markdown files._
-
 ## Introduction
 
 This document describes the output produced by the pipeline. Most of the plots are taken from the MultiQC report, which summarises results at the end of the pipeline.
@@ -14,21 +10,29 @@ The directories listed below will be created in the results directory after the
 The pipeline is built using [Nextflow](https://www.nextflow.io/)
 and processes data using the following steps:
 
-* [Reads alignment](#reads-alignment)
-* [Valid pairs detection](#valid-pairs-detection)
-* [Duplicates removal](#duplicates-removal)
-* [Contact maps](#contact-maps)
+* [HiC-Pro](#hicpro)
+  * [Reads alignment](#reads-alignment)
+  * [Valid pairs detection](#valid-pairs-detection)
+  * [Duplicates removal](#duplicates-removal)
+  * [Contact maps](#hicpro-contact-maps)
+* [Hi-C contact maps](#hic-contact-maps)
+* [Downstream analysis](#downstream-analysis)
+  * [Distance decay](#distance-decay)
+  * [Compartments calling](#compartments calling)
+  * [TADs calling](#tads-calling)
 * [MultiQC](#multiqc) - aggregate report and quality controls, describing
 results of the whole pipeline
 * [Export](#exprot) - additionnal export for compatibility with downstream
 analysis tool and visualization
 
+## HiC-Pro
+
 The current version is mainly based on the
 [HiC-Pro](https://github.com/nservant/HiC-Pro) pipeline.
 For details about the workflow, see
 [Servant et al. 2015](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-015-0831-x)
 
-## Reads alignment
+### Reads alignment
 
 Using Hi-C data, each reads mate has to be independantly aligned on the
 reference genome.
@@ -43,7 +47,7 @@ configuration parameters (`--rm-multi`).
 Note that if the `--dnase` mode is activated, HiC-Pro will skip the second
 mapping step.
 
-**Output directory: `results/mapping`**
+**Output directory: `results/hicpro/mapping`**
 
 * `*bwt2pairs.bam` - final BAM file with aligned paired data
 * `*.pairstat` - mapping statistics
@@ -68,7 +72,7 @@ the fraction of unmapped reads. The fraction of singleton is usually close to
 the sum of unmapped R1 and R2 reads, as it is unlikely that both mates from the
 same pair were unmapped.
 
-## Valid pairs detection
+### Valid pairs detection with HiC-Pro
 
 Each aligned reads can be assigned to one restriction fragment according to the
 reference genome and the digestion protocol.
@@ -96,6 +100,8 @@ DNase Hi-C or micro Hi-C, the assignment to a restriction is not possible
 Short range interactions that are likely to be spurious ligation products
 can thus be discarded using the `--min_cis_dist` parameter.
 
+**Output directory: `results/hicpro/valid_pairs`**
+
 * `*.validPairs` - List of valid ligation products
 * `*.DEpairs` - List of dangling-end products
 * `*.SCPairs` - List of self-circle products
@@ -122,12 +128,14 @@ is skipped. The aligned pairs are therefore directly used to generate the
 contact maps. A filter of the short range contact (typically <1kb) is
 recommanded as this pairs are likely to be self ligation products.
 
-## Duplicates removal
+### Duplicates removal
 
 Note that validPairs file are generated per reads chunck.
 These files are then merged in the allValidPairs file, and duplicates are
 removed if the `--rm_dup` parameter is used.
 
+**Output directory: `results/hicpro/valid_pairs`**
+
 * `*allValidPairs` - combined valid pairs from all read chunks
 * `*mergestat` - statistics about duplicates removal and valid pairs information
 
@@ -141,24 +149,29 @@ Finaly, an important metric is to look at the fraction of intra and
 inter-chromosomal interactions, as well as long range (>20kb) versus short
 range (<20kb) intra-chromosomal interactions.
 
-## Contact maps
+### Contact maps
 
 Intra et inter-chromosomal contact maps are build for all specified resolutions.
 The genome is splitted into bins of equal size. Each valid interaction is
 associated with the genomic bins to generate the raw maps.
 In addition, Hi-C data can contain several sources of biases which has to be
 corrected.
-The current workflow uses the [ìced](https://github.com/hiclib/iced) and
+The HiC-Pro workflow uses the [ìced](https://github.com/hiclib/iced) and
 [Varoquaux and Servant, 2018](http://joss.theoj.org/papers/10.21105/joss.01286)
 python package which proposes a fast implementation of the original ICE
 normalization algorithm (Imakaev et al. 2012), making the assumption of equal
 visibility of each fragment.
 
+Importantly, the HiC-Pro maps are generated only if the `--hicpro_maps` option
+is specified on the command line.
+
+**Output directory: `results/hicpro/matrix`**
+
 * `*.matrix` - genome-wide contact maps
 * `*_iced.matrix` - genome-wide iced contact maps
 
-The contact maps are generated for all specified resolution
-(see `--bin_size` argument)
+The contact maps are generated for all specified resolutions
+(see `--bin_size` argument).  
 A contact map is defined by :
 
 * A list of genomic intervals related to the specified resolution (BED format).
@@ -180,6 +193,58 @@ files.
 This format is memory efficient, and is compatible with several software for
 downstream analysis.
 
+## Hi-C contact maps
+
+Contact maps are usually stored as simple txt (`HiC-Pro`), .hic (`Juicer/Juicebox`) and .(m)cool (`cooler/Higlass`) formats.
+Note that .cool and .hic format are compressed and usually much more efficient that the txt format.  
+In the current workflow, we propose to use the `cooler` format as a standard to build the raw and normalized maps
+after valid pairs detection as it is used by several downstream analysis and visualization tools.
+
+Raw contact maps are therefore in **`results/contact_maps/raw`** which contains the different maps in `txt` and `cool` formats, at various resolutions.
+Normalized contact maps are stored in **`results/contact_maps/norm`** which contains the different maps in `txt`, `cool`, and `mcool` format.
+
+Note that `txt` contact maps generated with `cooler` are identical to those generated by `HiC-Pro`.
+However, differences can be observed on the normalized contact maps as the balancing algorithm is not the same.
+
+## Downstream analysis
+
+Downstream analysis are performed from `cool` files at specified resolution.
+
+### Distance decay
+
+The distance decay plot shows the relationship between contact frequencies and genomic distance. It gives a good indication of the compaction of the genome.
+According to the organism, the slope of the curve should fit the expectation of polymer physics models.
+
+The results generated with the `HiCExplorer hicPlotDistVsCounts` tool (plot and table) are available in the **`results/dist_decay/`** folder.
+
+### Compartments calling
+
+Compartments calling is one of the most common analysis which aims at detecting A (open, active) / B (close, inactive) compartments.
+In the first studies on the subject, the compartments were called at high/medium resolution (1000000 to 250000) which is enough to call A/B comparments.
+Analysis at higher resolution has shown that these two main types of compartments can be further divided into compartments subtypes.
+
+Although different methods have been proposed for compartment calling, the standard remains the eigen vector decomposition from the normalized correlation maps.
+Here, we use the implementation available in the [`cooltools`](https://cooltools.readthedocs.io/en/lates) package.
+
+Results are available in **`results/compartments/`** folder and includes :
+
+* `*cis.vecs.tsv`: eigenvectors decomposition along the genome
+* `*cis.lam.txt`: eigenvalues associated with the eigenvectors
+
+### TADs calling
+
+TADs has been described as functional units of the genome.
+While contacts between genes and regulatority elements can occur within a single TADs, contacts between TADs are much less frequent, mainly due to the presence of insulation protein (such as CTCF) at their boundaries. Looking at Hi-C maps, TADs look like triangles around the diagonal. According to the contact map resolutions, TADs appear as hierarchical structures with a median size around 1Mb (in mammals), as well as smaller structures usually called sub-TADs of smaller size.
+
+TADs calling remains a challenging task, and even if many methods have been proposed in the last decade, little overlap have been found between their results.
+
+Currently, the pipeline proposes two approaches :
+
+* Insulation score using the [`cooltools`](https://cooltools.readthedocs.io/en/latest/cli.html#cooltools-diamond-insulation) package. Results are availabe in **`results/tads/insulation`**.
+* [`HiCExplorer TADs calling`](https://hicexplorer.readthedocs.io/en/latest/content/tools/hicFindTADs.html). Results are available at **`results/tads/hicexplorer`**.
+
+Usually, TADs results are presented as simple BED files, or bigWig files, with the position of boundaries along the genome.
+
 ## MultiQC
 
 [MultiQC](http://multiqc.info) is a visualisation tool that generates a single
diff --git a/docs/usage.md b/docs/usage.md
index 4e7946caeb3bdd2e854060472b37c7ac66c310f5..f072ba565b84454879b8f1e9f83cf368f74ae24f 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -65,7 +65,7 @@ fails after three times then the pipeline is stopped.
 Use this parameter to choose a configuration profile. Profiles can give
 configuration presets for different compute environments.
 
-Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Conda) - see below.
+Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below.
 
 > We highly recommend the use of Docker or Singularity containers for full
 pipeline reproducibility, however when this is not possible, Conda is also supported.
@@ -96,8 +96,14 @@ installed and available on the `PATH`. This is _not_ recommended.
 * `podman`
   * A generic configuration profile to be used with [Podman](https://podman.io/)
   * Pulls software from Docker Hub: [`nfcore/hic`](https://hub.docker.com/r/nfcore/hic/)
+* `shifter`
+  * A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/)
+  * Pulls software from Docker Hub: [`nfcore/hic`](https://hub.docker.com/r/nfcore/hic/)
+* `charliecloud`
+  * A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/)
+  * Pulls software from Docker Hub: [`nfcore/hic`](https://hub.docker.com/r/nfcore/hic/)
 * `conda`
-  * Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity or Podman.
+  * Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud.
   * A generic configuration profile to be used with [Conda](https://conda.io/docs/)
   * Pulls most software from [Bioconda](https://bioconda.github.io/)
 * `test`
@@ -139,8 +145,9 @@ process {
 }
 ```
 
-See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html)
-for more information.
+To find the exact name of a process you wish to modify the compute resources, check the live-status of a nextflow run displayed on your terminal or check the nextflow error for a line like so: `Error executing process > 'bwa'`. In this case the name to specify in the custom config file is `bwa`.
+
+See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for more information.
 
 If you are likely to be running `nf-core` pipelines regularly it may be a good idea to request that your custom config file is uploaded to the `nf-core/configs` git repository. Before you do this please can you test that the config file works with your pipeline of choice using the `-c` parameter (see definition above). You can then create a pull request to the `nf-core/configs` repository with the addition of your config file, associated documentation file (see examples in [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs)), and amending [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) to include your custom profile.
 
@@ -178,7 +185,7 @@ NXF_OPTS='-Xms1g -Xmx4g'
 ### Hi-C digestion protocol
 
 Here is an command line example for standard DpnII digestion protocols.
-Alignment will be performed on the `mm10` genome with default paramters.
+Alignment will be performed on the `mm10` genome with default parameters.
 Multi-hits will not be considered and duplicates will be removed.
 Note that by default, no filters are applied on DNA and restriction fragment sizes.
 
@@ -243,13 +250,13 @@ run the pipeline:
 
 ### `--bwt2_index`
 
-The bowtie2 indexes are required to run the Hi-C pipeline. If the
+The bowtie2 indexes are required to align the data with the HiC-Pro workflow. If the
 `--bwt2_index` is not specified, the pipeline will either use the igenome
 bowtie2 indexes (see `--genome` option) or build the indexes on-the-fly
 (see `--fasta` option)
 
 ```bash
---bwt2_index '[path to bowtie2 index (with basename)]'
+--bwt2_index '[path to bowtie2 index]'
 ```
 
 ### `--chromosome_size`
@@ -298,7 +305,7 @@ file with coordinates of restriction fragments.
 
 If not specified, this file will be automatically created by the pipline.
 In this case, the `--fasta` reference genome will be used.
-Note that the `--restriction_site` parameter is mandatory to create this file.
+Note that the `digestion` or `--restriction_site` parameter is mandatory to create this file.
 
 ## Hi-C specific options
 
@@ -306,7 +313,7 @@ The following options are defined in the `nextflow.config` file, and can be
 updated either using a custom configuration file (see `-c` option) or using
 command line parameter.
 
-### Reads mapping
+### HiC-pro mapping
 
 The reads mapping is currently based on the two-steps strategy implemented in
 the HiC-pro pipeline. The idea is to first align reads from end-to-end.
@@ -391,6 +398,21 @@ Default: 'AAGCTAGCTT'
 
 Exemple of the ARIMA kit: GATCGATC,GANTGATC,GANTANTC,GATCANTC
 
+### DNAse Hi-C
+
+#### `--dnase`
+
+In DNAse Hi-C mode, all options related to digestion Hi-C
+(see previous section) are ignored.
+In this case, it is highly recommanded to use the `--min_cis_dist` parameter
+to remove spurious ligation products.
+
+```bash
+--dnase'
+```
+
+### HiC-pro processing
+
 #### `--min_restriction_fragment_size`
 
 Minimum size of restriction fragments to consider for the Hi-C processing.
@@ -427,21 +449,6 @@ Default: '0' - no filter
 --max_insert_size '[numeric]'
 ```
 
-### DNAse Hi-C
-
-#### `--dnase`
-
-In DNAse Hi-C mode, all options related to digestion Hi-C
-(see previous section) are ignored.
-In this case, it is highly recommanded to use the `--min_cis_dist` parameter
-to remove spurious ligation products.
-
-```bash
---dnase'
-```
-
-### Hi-C processing
-
 #### `--min_cis_dist`
 
 Filter short range contact below the specified distance.
@@ -472,16 +479,42 @@ Note that in this case the `--min_mapq` parameter is ignored.
 
 ## Genome-wide contact maps
 
+Once the list of valid pairs is available, the standard is now to move on the `cooler`
+framework to build the raw and balanced contact maps in txt and (m)cool formats.
+
 ### `--bin_size`
 
-Resolution of contact maps to generate (space separated).
-Default:'1000000,500000'
+Resolution of contact maps to generate (comma separated).
+Default:'1000000'
+
+```bash
+--bins_size '[string]'
+```
+
+### `--res_zoomify`
+
+Define the maximum resolution to reach when zoomify the cool contact maps.
+Default:'5000'
+
+```bash
+--res_zoomify '[string]'
+```
+
+### HiC-Pro contact maps
+
+By default, the contact maps are now generated with the `cooler` framework.
+However, for backward compatibility, the raw and normalized maps can still be generated
+by HiC-pro if the `--hicpro_maps` parameter is set.
+
+#### `--hicpro_maps`
+
+If specified, the raw and ICE normalized contact maps will be generated by HiC-Pro.
 
 ```bash
---bins_size '[numeric]'
+--hicpro_maps
 ```
 
-### `--ice_max_iter`
+#### `--ice_max_iter`
 
 Maximum number of iteration for ICE normalization.
 Default: 100
@@ -490,7 +523,7 @@ Default: 100
 --ice_max_iter '[numeric]'
 ```
 
-### `--ice_filer_low_count_perc`
+#### `--ice_filer_low_count_perc`
 
 Define which pourcentage of bins with low counts should be force to zero.
 Default: 0.02
@@ -499,7 +532,7 @@ Default: 0.02
 --ice_filter_low_count_perc '[numeric]'
 ```
 
-### `--ice_filer_high_count_perc`
+#### `--ice_filer_high_count_perc`
 
 Define which pourcentage of bins with low counts should be discarded before
 normalization. Default: 0
@@ -508,7 +541,7 @@ normalization. Default: 0
 --ice_filter_high_count_perc '[numeric]'
 ```
 
-### `--ice_eps`
+#### `--ice_eps`
 
 The relative increment in the results before declaring convergence for ICE
 normalization. Default: 0.1
@@ -517,6 +550,54 @@ normalization. Default: 0.1
 --ice_eps '[numeric]'
 ```
 
+## Downstream analysis
+
+### Additional quality controls
+
+#### `--res_dist_decay`
+
+Generates distance vs Hi-C counts plots at a given resolution using `HiCExplorer`.
+Several resolution can be specified (comma separeted). Default: '250000'
+
+```bash
+--res_dist_decay '[string]'
+```
+
+### Compartment calling
+
+Call open/close compartments for each chromosome, using the `cooltools` command.
+
+#### `--res_compartments`
+
+Resolution to call the chromosome compartments (comma separated).
+Default: '250000'
+
+```bash
+--res_compartments '[string]'
+```
+
+### TADs calling
+
+#### `--tads_caller`
+
+TADs calling can be performed using different approaches.
+Currently available options are `insulation` and `hicexplorer`.
+Note that all options can be specified (comma separated).
+Default: 'insulation'
+
+```bash
+--tads_caller '[string]'
+```
+
+#### `--res_tads`
+
+Resolution to run the TADs calling analysis (comma separated).
+Default: '40000'
+
+```bash
+--res_tads '[string]'
+```
+
 ## Inputs/Outputs
 
 ### `--split_fastq`
@@ -571,13 +652,13 @@ genome-wide maps are not built. Usefult for capture-C analysis. Default: false
 --skip_maps
 ```
 
-### `--skip_ice`
+### `--skip_balancing`
 
-If defined, the ICE normalization is not run on the raw contact maps.
+If defined, the contact maps normalization is not run on the raw contact maps.
 Default: false
 
 ```bash
---skip_ice
+--skip_balancing
 ```
 
 ### `--skip_cool`
@@ -588,6 +669,30 @@ If defined, cooler files are not generated. Default: false
 --skip_cool
 ```
 
+### `skip_dist_decay`
+
+Do not run distance decay plots. Default: false
+
+```bash
+--skip_dist_decay
+```
+
+### `skip_compartments`
+
+Do not call compartments. Default: false
+
+```bash
+--skip_compartments
+```
+
+### `skip_tads`
+
+Do not call TADs. Default: false
+
+```bash
+--skip_tads
+```
+
 ### `--skip_multiQC`
 
 If defined, the MultiQC report is not generated. Default: false
diff --git a/environment.yml b/environment.yml
index 90d9eea12420ed6e6066608aadc55d16120e8dfa..85def0f721d3b200671fe2cc00c383dc5b226b54 100644
--- a/environment.yml
+++ b/environment.yml
@@ -8,6 +8,7 @@ channels:
 dependencies:
   - conda-forge::python=3.7.6
   - pip=20.0.1
+  - conda-forge::tbb=2020.2=hc9558a2_0 
   - conda-forge::scipy=1.4.1
   - conda-forge::numpy=1.18.1
   - bioconda::iced=0.5.6
@@ -26,5 +27,5 @@ dependencies:
   - bioconda::ucsc-bedgraphtobigwig=357
   - conda-forge::cython=0.29.19
   - pip:
-    - cooltools==0.3.2
+    - cooltools==0.4.0
     - fanc==0.8.30
diff --git a/lib/Headers.groovy b/lib/Headers.groovy
new file mode 100644
index 0000000000000000000000000000000000000000..15d1d388006df42e226aea961f0d21dbdabaa8cb
--- /dev/null
+++ b/lib/Headers.groovy
@@ -0,0 +1,43 @@
+/*
+ * This file holds several functions used to render the nf-core ANSI header.
+ */
+
+class Headers {
+
+    private static Map log_colours(Boolean monochrome_logs) {
+        Map colorcodes = [:]
+        colorcodes['reset']       = monochrome_logs ? '' : "\033[0m"
+        colorcodes['dim']         = monochrome_logs ? '' : "\033[2m"
+        colorcodes['black']       = monochrome_logs ? '' : "\033[0;30m"
+        colorcodes['green']       = monochrome_logs ? '' : "\033[0;32m"
+        colorcodes['yellow']      = monochrome_logs ? '' :  "\033[0;33m"
+        colorcodes['yellow_bold'] = monochrome_logs ? '' : "\033[1;93m"
+        colorcodes['blue']        = monochrome_logs ? '' : "\033[0;34m"
+        colorcodes['purple']      = monochrome_logs ? '' : "\033[0;35m"
+        colorcodes['cyan']        = monochrome_logs ? '' : "\033[0;36m"
+        colorcodes['white']       = monochrome_logs ? '' : "\033[0;37m"
+        colorcodes['red']         = monochrome_logs ? '' : "\033[1;91m"
+        return colorcodes
+    }
+
+    static String dashed_line(monochrome_logs) {
+        Map colors = log_colours(monochrome_logs)
+        return "-${colors.dim}----------------------------------------------------${colors.reset}-"
+    }
+
+    static String nf_core(workflow, monochrome_logs) {
+        Map colors = log_colours(monochrome_logs)
+        String.format(
+            """\n
+            ${dashed_line(monochrome_logs)}
+                                                    ${colors.green},--.${colors.black}/${colors.green},-.${colors.reset}
+            ${colors.blue}        ___     __   __   __   ___     ${colors.green}/,-._.--~\'${colors.reset}
+            ${colors.blue}  |\\ | |__  __ /  ` /  \\ |__) |__         ${colors.yellow}}  {${colors.reset}
+            ${colors.blue}  | \\| |       \\__, \\__/ |  \\ |___     ${colors.green}\\`-._,-`-,${colors.reset}
+                                                    ${colors.green}`._,._,\'${colors.reset}
+            ${colors.purple}  ${workflow.manifest.name} v${workflow.manifest.version}${colors.reset}
+            ${dashed_line(monochrome_logs)}
+            """.stripIndent()
+        )
+    }
+}
diff --git a/lib/NfcoreSchema.groovy b/lib/NfcoreSchema.groovy
new file mode 100644
index 0000000000000000000000000000000000000000..52ee730432905c5f6dc3e2c89352bbaee6ea145b
--- /dev/null
+++ b/lib/NfcoreSchema.groovy
@@ -0,0 +1,573 @@
+/*
+ * This file holds several functions used to perform JSON parameter validation, help and summary rendering for the nf-core pipeline template.
+ */
+
+import org.everit.json.schema.Schema
+import org.everit.json.schema.loader.SchemaLoader
+import org.everit.json.schema.ValidationException
+import org.json.JSONObject
+import org.json.JSONTokener
+import org.json.JSONArray
+import groovy.json.JsonSlurper
+import groovy.json.JsonBuilder
+
+class NfcoreSchema {
+
+    /*
+    * Function to loop over all parameters defined in schema and check
+    * whether the given paremeters adhere to the specificiations
+    */
+    /* groovylint-disable-next-line UnusedPrivateMethodParameter */
+    private static void validateParameters(params, jsonSchema, log) {
+        def has_error = false
+        //=====================================================================//
+        // Check for nextflow core params and unexpected params
+        def json = new File(jsonSchema).text
+        def Map schemaParams = (Map) new JsonSlurper().parseText(json).get('definitions')
+        def nf_params = [
+            // Options for base `nextflow` command
+            'bg',
+            'c',
+            'C',
+            'config',
+            'd',
+            'D',
+            'dockerize',
+            'h',
+            'log',
+            'q',
+            'quiet',
+            'syslog',
+            'v',
+            'version',
+
+            // Options for `nextflow run` command
+            'ansi',
+            'ansi-log',
+            'bg',
+            'bucket-dir',
+            'c',
+            'cache',
+            'config',
+            'dsl2',
+            'dump-channels',
+            'dump-hashes',
+            'E',
+            'entry',
+            'latest',
+            'lib',
+            'main-script',
+            'N',
+            'name',
+            'offline',
+            'params-file',
+            'pi',
+            'plugins',
+            'poll-interval',
+            'pool-size',
+            'profile',
+            'ps',
+            'qs',
+            'queue-size',
+            'r',
+            'resume',
+            'revision',
+            'stdin',
+            'stub',
+            'stub-run',
+            'test',
+            'w',
+            'with-charliecloud',
+            'with-conda',
+            'with-dag',
+            'with-docker',
+            'with-mpi',
+            'with-notification',
+            'with-podman',
+            'with-report',
+            'with-singularity',
+            'with-timeline',
+            'with-tower',
+            'with-trace',
+            'with-weblog',
+            'without-docker',
+            'without-podman',
+            'work-dir'
+        ]
+        def unexpectedParams = []
+
+        // Collect expected parameters from the schema
+        def expectedParams = []
+        for (group in schemaParams) {
+            for (p in group.value['properties']) {
+                expectedParams.push(p.key)
+            }
+        }
+
+        for (specifiedParam in params.keySet()) {
+            // nextflow params
+            if (nf_params.contains(specifiedParam)) {
+                log.error "ERROR: You used a core Nextflow option with two hyphens: '--${specifiedParam}'. Please resubmit with '-${specifiedParam}'"
+                has_error = true
+            }
+            // unexpected params
+            def params_ignore = params.schema_ignore_params.split(',') + 'schema_ignore_params'
+            def expectedParamsLowerCase = expectedParams.collect{ it.replace("-", "").toLowerCase() }
+            def specifiedParamLowerCase = specifiedParam.replace("-", "").toLowerCase()
+            if (!expectedParams.contains(specifiedParam) && !params_ignore.contains(specifiedParam) && !expectedParamsLowerCase.contains(specifiedParamLowerCase)) {
+                // Temporarily remove camelCase/camel-case params #1035
+                def unexpectedParamsLowerCase = unexpectedParams.collect{ it.replace("-", "").toLowerCase()}
+                if (!unexpectedParamsLowerCase.contains(specifiedParamLowerCase)){
+                    unexpectedParams.push(specifiedParam)
+                }
+            }
+        }
+
+        //=====================================================================//
+        // Validate parameters against the schema
+        InputStream inputStream = new File(jsonSchema).newInputStream()
+        JSONObject rawSchema = new JSONObject(new JSONTokener(inputStream))
+
+        // Remove anything that's in params.schema_ignore_params
+        rawSchema = removeIgnoredParams(rawSchema, params)
+
+        Schema schema = SchemaLoader.load(rawSchema)
+
+        // Clean the parameters
+        def cleanedParams = cleanParameters(params)
+
+        // Convert to JSONObject
+        def jsonParams = new JsonBuilder(cleanedParams)
+        JSONObject paramsJSON = new JSONObject(jsonParams.toString())
+
+        // Validate
+        try {
+            schema.validate(paramsJSON)
+        } catch (ValidationException e) {
+            println ''
+            log.error 'ERROR: Validation of pipeline parameters failed!'
+            JSONObject exceptionJSON = e.toJSON()
+            printExceptions(exceptionJSON, paramsJSON, log)
+            println ''
+            has_error = true
+        }
+
+        // Check for unexpected parameters
+        if (unexpectedParams.size() > 0) {
+            Map colors = log_colours(params.monochrome_logs)
+            println ''
+            def warn_msg = 'Found unexpected parameters:'
+            for (unexpectedParam in unexpectedParams) {
+                warn_msg = warn_msg + "\n* --${unexpectedParam}: ${params[unexpectedParam].toString()}"
+            }
+            log.warn warn_msg
+            log.info "- ${colors.dim}Ignore this warning: params.schema_ignore_params = \"${unexpectedParams.join(',')}\" ${colors.reset}"
+            println ''
+        }
+
+        if (has_error) {
+            System.exit(1)
+        }
+    }
+
+    // Loop over nested exceptions and print the causingException
+    private static void printExceptions(exJSON, paramsJSON, log) {
+        def causingExceptions = exJSON['causingExceptions']
+        if (causingExceptions.length() == 0) {
+            def m = exJSON['message'] =~ /required key \[([^\]]+)\] not found/
+            // Missing required param
+            if (m.matches()) {
+                log.error "* Missing required parameter: --${m[0][1]}"
+            }
+            // Other base-level error
+            else if (exJSON['pointerToViolation'] == '#') {
+                log.error "* ${exJSON['message']}"
+            }
+            // Error with specific param
+            else {
+                def param = exJSON['pointerToViolation'] - ~/^#\//
+                def param_val = paramsJSON[param].toString()
+                log.error "* --${param}: ${exJSON['message']} (${param_val})"
+            }
+        }
+        for (ex in causingExceptions) {
+            printExceptions(ex, paramsJSON, log)
+        }
+    }
+
+    // Remove an element from a JSONArray
+    private static JSONArray removeElement(jsonArray, element){
+        def list = []
+        int len = jsonArray.length()
+        for (int i=0;i<len;i++){
+            list.add(jsonArray.get(i).toString())
+        }
+        list.remove(element)
+        JSONArray jsArray = new JSONArray(list)
+        return jsArray
+    }
+
+    private static JSONObject removeIgnoredParams(rawSchema, params){
+        // Remove anything that's in params.schema_ignore_params
+        params.schema_ignore_params.split(',').each{ ignore_param ->
+            if(rawSchema.keySet().contains('definitions')){
+                rawSchema.definitions.each { definition ->
+                    for (key in definition.keySet()){
+                        if (definition[key].get("properties").keySet().contains(ignore_param)){
+                            // Remove the param to ignore
+                            definition[key].get("properties").remove(ignore_param)
+                            // If the param was required, change this
+                            if (definition[key].has("required")) {
+                                def cleaned_required = removeElement(definition[key].required, ignore_param)
+                                definition[key].put("required", cleaned_required)
+                            }
+                        }
+                    }
+                }
+            }
+            if(rawSchema.keySet().contains('properties') && rawSchema.get('properties').keySet().contains(ignore_param)) {
+                rawSchema.get("properties").remove(ignore_param)
+            }
+            if(rawSchema.keySet().contains('required') && rawSchema.required.contains(ignore_param)) {
+                def cleaned_required = removeElement(rawSchema.required, ignore_param)
+                rawSchema.put("required", cleaned_required)
+            }
+        }
+        return rawSchema
+    }
+
+    private static Map cleanParameters(params) {
+        def new_params = params.getClass().newInstance(params)
+        for (p in params) {
+            // remove anything evaluating to false
+            if (!p['value']) {
+                new_params.remove(p.key)
+            }
+            // Cast MemoryUnit to String
+            if (p['value'].getClass() == nextflow.util.MemoryUnit) {
+                new_params.replace(p.key, p['value'].toString())
+            }
+            // Cast Duration to String
+            if (p['value'].getClass() == nextflow.util.Duration) {
+                new_params.replace(p.key, p['value'].toString().replaceFirst(/d(?!\S)/, "day"))
+            }
+            // Cast LinkedHashMap to String
+            if (p['value'].getClass() == LinkedHashMap) {
+                new_params.replace(p.key, p['value'].toString())
+            }
+        }
+        return new_params
+    }
+
+     /*
+     * This method tries to read a JSON params file
+     */
+    private static LinkedHashMap params_load(String json_schema) {
+        def params_map = new LinkedHashMap()
+        try {
+            params_map = params_read(json_schema)
+        } catch (Exception e) {
+            println "Could not read parameters settings from JSON. $e"
+            params_map = new LinkedHashMap()
+        }
+        return params_map
+    }
+
+    private static Map log_colours(Boolean monochrome_logs) {
+        Map colorcodes = [:]
+
+        // Reset / Meta
+        colorcodes['reset']       = monochrome_logs ? '' : "\033[0m"
+        colorcodes['bold']        = monochrome_logs ? '' : "\033[1m"
+        colorcodes['dim']         = monochrome_logs ? '' : "\033[2m"
+        colorcodes['underlined']  = monochrome_logs ? '' : "\033[4m"
+        colorcodes['blink']       = monochrome_logs ? '' : "\033[5m"
+        colorcodes['reverse']     = monochrome_logs ? '' : "\033[7m"
+        colorcodes['hidden']      = monochrome_logs ? '' : "\033[8m"
+
+        // Regular Colors
+        colorcodes['black']       = monochrome_logs ? '' : "\033[0;30m"
+        colorcodes['red']         = monochrome_logs ? '' : "\033[0;31m"
+        colorcodes['green']       = monochrome_logs ? '' : "\033[0;32m"
+        colorcodes['yellow']      = monochrome_logs ? '' : "\033[0;33m"
+        colorcodes['blue']        = monochrome_logs ? '' : "\033[0;34m"
+        colorcodes['purple']      = monochrome_logs ? '' : "\033[0;35m"
+        colorcodes['cyan']        = monochrome_logs ? '' : "\033[0;36m"
+        colorcodes['white']       = monochrome_logs ? '' : "\033[0;37m"
+
+        // Bold
+        colorcodes['bblack']      = monochrome_logs ? '' : "\033[1;30m"
+        colorcodes['bred']        = monochrome_logs ? '' : "\033[1;31m"
+        colorcodes['bgreen']      = monochrome_logs ? '' : "\033[1;32m"
+        colorcodes['byellow']     = monochrome_logs ? '' : "\033[1;33m"
+        colorcodes['bblue']       = monochrome_logs ? '' : "\033[1;34m"
+        colorcodes['bpurple']     = monochrome_logs ? '' : "\033[1;35m"
+        colorcodes['bcyan']       = monochrome_logs ? '' : "\033[1;36m"
+        colorcodes['bwhite']      = monochrome_logs ? '' : "\033[1;37m"
+
+        // Underline
+        colorcodes['ublack']      = monochrome_logs ? '' : "\033[4;30m"
+        colorcodes['ured']        = monochrome_logs ? '' : "\033[4;31m"
+        colorcodes['ugreen']      = monochrome_logs ? '' : "\033[4;32m"
+        colorcodes['uyellow']     = monochrome_logs ? '' : "\033[4;33m"
+        colorcodes['ublue']       = monochrome_logs ? '' : "\033[4;34m"
+        colorcodes['upurple']     = monochrome_logs ? '' : "\033[4;35m"
+        colorcodes['ucyan']       = monochrome_logs ? '' : "\033[4;36m"
+        colorcodes['uwhite']      = monochrome_logs ? '' : "\033[4;37m"
+
+        // High Intensity
+        colorcodes['iblack']      = monochrome_logs ? '' : "\033[0;90m"
+        colorcodes['ired']        = monochrome_logs ? '' : "\033[0;91m"
+        colorcodes['igreen']      = monochrome_logs ? '' : "\033[0;92m"
+        colorcodes['iyellow']     = monochrome_logs ? '' : "\033[0;93m"
+        colorcodes['iblue']       = monochrome_logs ? '' : "\033[0;94m"
+        colorcodes['ipurple']     = monochrome_logs ? '' : "\033[0;95m"
+        colorcodes['icyan']       = monochrome_logs ? '' : "\033[0;96m"
+        colorcodes['iwhite']      = monochrome_logs ? '' : "\033[0;97m"
+
+        // Bold High Intensity
+        colorcodes['biblack']     = monochrome_logs ? '' : "\033[1;90m"
+        colorcodes['bired']       = monochrome_logs ? '' : "\033[1;91m"
+        colorcodes['bigreen']     = monochrome_logs ? '' : "\033[1;92m"
+        colorcodes['biyellow']    = monochrome_logs ? '' : "\033[1;93m"
+        colorcodes['biblue']      = monochrome_logs ? '' : "\033[1;94m"
+        colorcodes['bipurple']    = monochrome_logs ? '' : "\033[1;95m"
+        colorcodes['bicyan']      = monochrome_logs ? '' : "\033[1;96m"
+        colorcodes['biwhite']     = monochrome_logs ? '' : "\033[1;97m"
+
+        return colorcodes
+    }
+
+    static String dashed_line(monochrome_logs) {
+        Map colors = log_colours(monochrome_logs)
+        return "-${colors.dim}----------------------------------------------------${colors.reset}-"
+    }
+
+    /*
+    Method to actually read in JSON file using Groovy.
+    Group (as Key), values are all parameters
+        - Parameter1 as Key, Description as Value
+        - Parameter2 as Key, Description as Value
+        ....
+    Group
+        -
+    */
+    private static LinkedHashMap params_read(String json_schema) throws Exception {
+        def json = new File(json_schema).text
+        def Map schema_definitions = (Map) new JsonSlurper().parseText(json).get('definitions')
+        def Map schema_properties = (Map) new JsonSlurper().parseText(json).get('properties')
+        /* Tree looks like this in nf-core schema
+         * definitions <- this is what the first get('definitions') gets us
+             group 1
+               title
+               description
+                 properties
+                   parameter 1
+                     type
+                     description
+                   parameter 2
+                     type
+                     description
+             group 2
+               title
+               description
+                 properties
+                   parameter 1
+                     type
+                     description
+         * properties <- parameters can also be ungrouped, outside of definitions
+            parameter 1
+             type
+             description
+        */
+
+        // Grouped params
+        def params_map = new LinkedHashMap()
+        schema_definitions.each { key, val ->
+            def Map group = schema_definitions."$key".properties // Gets the property object of the group
+            def title = schema_definitions."$key".title
+            def sub_params = new LinkedHashMap()
+            group.each { innerkey, value ->
+                sub_params.put(innerkey, value)
+            }
+            params_map.put(title, sub_params)
+        }
+
+        // Ungrouped params
+        def ungrouped_params = new LinkedHashMap()
+        schema_properties.each { innerkey, value ->
+            ungrouped_params.put(innerkey, value)
+        }
+        params_map.put("Other parameters", ungrouped_params)
+
+        return params_map
+    }
+
+    /*
+     * Get maximum number of characters across all parameter names
+     */
+    private static Integer params_max_chars(params_map) {
+        Integer max_chars = 0
+        for (group in params_map.keySet()) {
+            def group_params = params_map.get(group)  // This gets the parameters of that particular group
+            for (param in group_params.keySet()) {
+                if (param.size() > max_chars) {
+                    max_chars = param.size()
+                }
+            }
+        }
+        return max_chars
+    }
+
+    /*
+     * Beautify parameters for --help
+     */
+    private static String params_help(workflow, params, json_schema, command) {
+        Map colors = log_colours(params.monochrome_logs)
+        Integer num_hidden = 0
+        String output  = ''
+        output        += 'Typical pipeline command:\n\n'
+        output        += "  ${colors.cyan}${command}${colors.reset}\n\n"
+        Map params_map = params_load(json_schema)
+        Integer max_chars  = params_max_chars(params_map) + 1
+        Integer desc_indent = max_chars + 14
+        Integer dec_linewidth = 160 - desc_indent
+        for (group in params_map.keySet()) {
+            Integer num_params = 0
+            String group_output = colors.underlined + colors.bold + group + colors.reset + '\n'
+            def group_params = params_map.get(group)  // This gets the parameters of that particular group
+            for (param in group_params.keySet()) {
+                if (group_params.get(param).hidden && !params.show_hidden_params) {
+                    num_hidden += 1
+                    continue;
+                }
+                def type = '[' + group_params.get(param).type + ']'
+                def description = group_params.get(param).description
+                def defaultValue = group_params.get(param).default ? " [default: " + group_params.get(param).default.toString() + "]" : ''
+                def description_default = description + colors.dim + defaultValue + colors.reset
+                // Wrap long description texts
+                // Loosely based on https://dzone.com/articles/groovy-plain-text-word-wrap
+                if (description_default.length() > dec_linewidth){
+                    List olines = []
+                    String oline = "" // " " * indent
+                    description_default.split(" ").each() { wrd ->
+                        if ((oline.size() + wrd.size()) <= dec_linewidth) {
+                            oline += wrd + " "
+                        } else {
+                            olines += oline
+                            oline = wrd + " "
+                        }
+                    }
+                    olines += oline
+                    description_default = olines.join("\n" + " " * desc_indent)
+                }
+                group_output += "  --" +  param.padRight(max_chars) + colors.dim + type.padRight(10) + colors.reset + description_default + '\n'
+                num_params += 1
+            }
+            group_output += '\n'
+            if (num_params > 0){
+                output += group_output
+            }
+        }
+        output += dashed_line(params.monochrome_logs)
+        if (num_hidden > 0){
+            output += colors.dim + "\n Hiding $num_hidden params, use --show_hidden_params to show.\n" + colors.reset
+            output += dashed_line(params.monochrome_logs)
+        }
+        return output
+    }
+
+    /*
+     * Groovy Map summarising parameters/workflow options used by the pipeline
+     */
+    private static LinkedHashMap params_summary_map(workflow, params, json_schema) {
+        // Get a selection of core Nextflow workflow options
+        def Map workflow_summary = [:]
+        if (workflow.revision) {
+            workflow_summary['revision'] = workflow.revision
+        }
+        workflow_summary['runName']      = workflow.runName
+        if (workflow.containerEngine) {
+            workflow_summary['containerEngine'] = workflow.containerEngine
+        }
+        if (workflow.container) {
+            workflow_summary['container'] = workflow.container
+        }
+        workflow_summary['launchDir']    = workflow.launchDir
+        workflow_summary['workDir']      = workflow.workDir
+        workflow_summary['projectDir']   = workflow.projectDir
+        workflow_summary['userName']     = workflow.userName
+        workflow_summary['profile']      = workflow.profile
+        workflow_summary['configFiles']  = workflow.configFiles.join(', ')
+
+        // Get pipeline parameters defined in JSON Schema
+        def Map params_summary = [:]
+        def blacklist  = ['hostnames']
+        def params_map = params_load(json_schema)
+        for (group in params_map.keySet()) {
+            def sub_params = new LinkedHashMap()
+            def group_params = params_map.get(group)  // This gets the parameters of that particular group
+            for (param in group_params.keySet()) {
+                if (params.containsKey(param) && !blacklist.contains(param)) {
+                    def params_value = params.get(param)
+                    def schema_value = group_params.get(param).default
+                    def param_type   = group_params.get(param).type
+                    if (schema_value != null) {
+                        if (param_type == 'string') {
+                            if (schema_value.contains('$projectDir') || schema_value.contains('${projectDir}')) {
+                                def sub_string = schema_value.replace('\$projectDir', '')
+                                sub_string     = sub_string.replace('\${projectDir}', '')
+                                if (params_value.contains(sub_string)) {
+                                    schema_value = params_value
+                                }
+                            }
+                            if (schema_value.contains('$params.outdir') || schema_value.contains('${params.outdir}')) {
+                                def sub_string = schema_value.replace('\$params.outdir', '')
+                                sub_string     = sub_string.replace('\${params.outdir}', '')
+                                if ("${params.outdir}${sub_string}" == params_value) {
+                                    schema_value = params_value
+                                }
+                            }
+                        }
+                    }
+
+                    // We have a default in the schema, and this isn't it
+                    if (schema_value != null && params_value != schema_value) {
+                        sub_params.put(param, params_value)
+                    }
+                    // No default in the schema, and this isn't empty
+                    else if (schema_value == null && params_value != "" && params_value != null && params_value != false) {
+                        sub_params.put(param, params_value)
+                    }
+                }
+            }
+            params_summary.put(group, sub_params)
+        }
+        return [ 'Core Nextflow options' : workflow_summary ] << params_summary
+    }
+
+    /*
+     * Beautify parameters for summary and return as string
+     */
+    private static String params_summary_log(workflow, params, json_schema) {
+        Map colors = log_colours(params.monochrome_logs)
+        String output  = ''
+        def params_map = params_summary_map(workflow, params, json_schema)
+        def max_chars  = params_max_chars(params_map)
+        for (group in params_map.keySet()) {
+            def group_params = params_map.get(group)  // This gets the parameters of that particular group
+            if (group_params) {
+                output += colors.bold + group + colors.reset + '\n'
+                for (param in group_params.keySet()) {
+                    output += "  " + colors.blue + param.padRight(max_chars) + ": " + colors.green +  group_params.get(param) + colors.reset + '\n'
+                }
+                output += '\n'
+            }
+        }
+        output += dashed_line(params.monochrome_logs)
+        output += colors.dim + "\n Only displaying parameters that differ from defaults.\n" + colors.reset
+        output += dashed_line(params.monochrome_logs)
+        return output
+    }
+
+}
diff --git a/lib/nfcore_external_java_deps.jar b/lib/nfcore_external_java_deps.jar
new file mode 100644
index 0000000000000000000000000000000000000000..805c8bb5e4fd43a12a5891eea5a68788309629b0
Binary files /dev/null and b/lib/nfcore_external_java_deps.jar differ
diff --git a/main.nf b/main.nf
index 382432fd4435b7e9b3c9d1b2600aca8958f8581b..963c260532e62d7fb63101ae6f355147fbf5f89d 100644
--- a/main.nf
+++ b/main.nf
@@ -9,98 +9,28 @@
 ----------------------------------------------------------------------------------------
 */
 
-def helpMessage() {
-    log.info nfcoreHeader()
-    log.info"""
-
-    Usage:
-
-    The typical command for running the pipeline is as follows:
-
-    nextflow run nf-core/hic --input '*_R{1,2}.fastq.gz' -profile docker
-
-    Mandatory arguments:
-      --input [file]                            Path to input data (must be surrounded with quotes)
-      --genome [str]                            Name of iGenomes reference
-      -profile [str]                            Configuration profile to use. Can use multiple (comma separated)
-                                                Available: conda, docker, singularity, awsbatch, test and more.
-
-    References                                  If not specified in the configuration file or you wish to overwrite any of the references.
-      --bwt2_index [file]                       Path to Bowtie2 index
-      --fasta [file]                            Path to Fasta reference
-
-    Digestion Hi-C                              If not specified in the configuration file or you wish to set up specific digestion protocol
-      --digestion [str]                         Digestion Hi-C. Name of restriction enzyme used for digestion pre-configuration. Default: 'hindiii'
-      --ligation_site [str]                     Ligation motifs to trim (comma separated) if not available in --digestion. Default: false
-      --restriction_site [str]                  Cutting motif(s) of restriction enzyme(s) (comma separated) if not available in --digestion. Default: false
-      --chromosome_size [file]                  Path to chromosome size file
-      --restriction_fragments [file]            Path to restriction fragment file (bed)
-      --save_reference [bool]                   Save reference genome to output folder. Default: False
-
-    DNase Hi-C
-      --dnase [bool]                            Run DNase Hi-C mode. All options related to restriction fragments are not considered. Default: False
-      --min_cis_dist [int]                      Minimum intra-chromosomal distance to consider. Default: None 
-
-    Alignments
-      --bwt2_opts_end2end [str]                 Options for bowtie2 end-to-end mappinf (first mapping step). See hic.config for default.
-      --bwt2_opts_trimmed [str]                 Options for bowtie2 mapping after ligation site trimming. See hic.config for default.
-      --min_mapq [int]                          Minimum mapping quality values to consider. Default: 10
-      --keep_multi [bool]                       Keep multi-mapped reads (--min_mapq is ignored). Default: false
-      --keep_dups [bool]                        Keep duplicates. Default: false
-      --save_aligned_intermediates [bool]       Save intermediates alignment files. Default: False
-      --split_fastq [bool]                      Split fastq files in reads chunks to speed up computation. Default: false
-      --fastq_chunks_size [int]                 Size of read chunks if split_fastq is true. Default: 20000000
-
-    Valid Pairs Detection
-      --min_restriction_fragment_size [int]     Minimum size of restriction fragments to consider. Default: None
-      --max_restriction_fragment_size [int]     Maximum size of restriction fragments to consider. Default: None
-      --min_insert_size [int]                   Minimum insert size of mapped reads to consider. Default: None
-      --max_insert_size [int]                   Maximum insert size of mapped reads to consider. Default: None
-      --save_interaction_bam [bool]             Save BAM file with interaction tags (dangling-end, self-circle, etc.). Default: False
-
-    Contact maps
-      --bin_size [str]                          Bin size for contact maps (comma separated). Default: '1000000,500000'
-      --ice_max_iter [int]                      Maximum number of iteration for ICE normalization. Default: 100
-      --ice_filter_low_count_perc [float]       Percentage of low counts columns/rows to filter before ICE normalization. Default: 0.02
-      --ice_filter_high_count_perc [float]      Percentage of high counts columns/rows to filter before ICE normalization. Default: 0
-      --ice_eps [float]                         Convergence criteria for ICE normalization. Default: 0.1
-
-    Workflow
-      --skip_maps [bool]                        Skip generation of contact maps. Useful for capture-C. Default: False
-      --skip_balancing [bool]                   Skip contact maps normalization. Default: False
-      --skip_mcool                              Skip mcool file generation. Default: False
-      --skip_dist_decay                         Skip distance decay quality control. Default: False
-      --skip_tads [bool]                        Skip TADs calling. Default: False
-      --skip_multiqc [bool]                     Skip MultiQC. Default: False
-
-    Other options:
-      --outdir [file]                           The output directory where the results will be saved
-      --publish_dir_mode [str]                  Mode for publishing results in the output directory. Available: symlink, rellink, link, copy, copyNoFollow, move (Default: copy)
-      --email [email]                           Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits. Default: None
-      --email_on_fail [email]                   Same as --email, except only send mail if the workflow is not successful
-      --max_multiqc_email_size [str]            Theshold size for MultiQC report to be attached in notification email. If file generated by pipeline exceeds the threshold, it will not be attached (Default: 25MB)
-      -name [str]                               Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic. Default: None
-
-    AWSBatch options:
-      --awsqueue [str]                          The AWSBatch JobQueue that needs to be set when running on AWSBatch
-      --awsregion [str]                         The AWS Region for your AWS Batch job to run on
-      --awscli [str]                            Path to the AWS CLI tool
-    """.stripIndent()
+log.info Headers.nf_core(workflow, params.monochrome_logs)
+
+////////////////////////////////////////////////////
+/* --               PRINT HELP                 -- */
+////////////////////////////////////////////////////+
+def json_schema = "$projectDir/nextflow_schema.json"
+if (params.help) {
+    def command = "nextflow run nf-core/hic --input '*_R{1,2}.fastq.gz' -profile docker"
+    log.info NfcoreSchema.params_help(workflow, params, json_schema, command)
+    exit 0
 }
 
-/**********************************************************
- * SET UP CONFIGURATION VARIABLES
- */
-
-// Show help message
-if (params.help){
-    helpMessage()
-    exit 0
+////////////////////////////////////////////////////
+/* --         VALIDATE PARAMETERS              -- */
+////////////////////////////////////////////////////+
+if (params.validate_params) {
+    NfcoreSchema.validateParameters(params, json_schema, log)
 }
 
 // Check if genome exists in the config file
 if (params.genomes && params.genome && !params.genomes.containsKey(params.genome)) {
-    exit 1, "The provided genome '${params.genome}' is not available in the iGenomes file. Currently the available genomes are ${params.genomes.keySet().join(", ")}"
+    exit 1, "The provided genome '${params.genome}' is not available in the iGenomes file. Currently the available genomes are ${params.genomes.keySet().join(', ')}"
 }
 
 if (params.digest && params.digestion && !params.digest.containsKey(params.digestion)) {
@@ -119,22 +49,20 @@ if (!params.dnase && !params.ligation_site) {
 params.bwt2_index = params.genome ? params.genomes[ params.genome ].bowtie2 ?: false : false
 params.fasta = params.genome ? params.genomes[ params.genome ].fasta ?: false : false
 
-// Has the run name been specified by the user?
-// this has the bonus effect of catching both -name and --name
-custom_runName = params.name
-if (!(workflow.runName ==~ /[a-z]+_[a-z]+/)) {
-    custom_runName = workflow.runName
-}
+
+////////////////////////////////////////////////////
+/* --     Collect configuration parameters     -- */
+////////////////////////////////////////////////////
 
 // Check AWS batch settings
 if (workflow.profile.contains('awsbatch')) {
     // AWSBatch sanity checking
-    if (!params.awsqueue || !params.awsregion) exit 1, "Specify correct --awsqueue and --awsregion parameters on AWSBatch!"
+    if (!params.awsqueue || !params.awsregion) exit 1, 'Specify correct --awsqueue and --awsregion parameters on AWSBatch!'
     // Check outdir paths to be S3 buckets if running on AWSBatch
     // related: https://github.com/nextflow-io/nextflow/issues/813
-    if (!params.outdir.startsWith('s3:')) exit 1, "Outdir not on S3 - specify S3 Bucket to run on AWSBatch!"
+    if (!params.outdir.startsWith('s3:')) exit 1, 'Outdir not on S3 - specify S3 Bucket to run on AWSBatch!'
     // Prevent trace files to be stored on S3 since S3 does not support rolling files.
-    if (params.tracedir.startsWith('s3:')) exit 1, "Specify a local tracedir or run without trace! S3 cannot be used for tracefiles."
+    if (params.tracedir.startsWith('s3:')) exit 1, 'Specify a local tracedir or run without trace! S3 cannot be used for tracefiles.'
 }
 
 // Stage config files
@@ -193,23 +121,23 @@ if (params.split_fastq ){
 
 // Reference genome
 if ( params.bwt2_index ){
-   lastPath = params.bwt2_index.lastIndexOf(File.separator)
-   bwt2_dir =  params.bwt2_index.substring(0,lastPath+1)
-   bwt2_base = params.bwt2_index.substring(lastPath+1)
+   //lastPath = params.bwt2_index.lastIndexOf(File.separator)
+   //bwt2_dir =  params.bwt2_index.substring(0,lastPath+1)
+   //bwt2_base = params.bwt2_index.substring(lastPath+1)
 
-   Channel.fromPath( bwt2_dir , checkIfExists: true)
+   Channel.fromPath( params.bwt2_index , checkIfExists: true)
       .ifEmpty { exit 1, "Genome index: Provided index not found: ${params.bwt2_index}" }
       .into { bwt2_index_end2end; bwt2_index_trim }
 
 }
 else if ( params.fasta ) {
-   lastPath = params.fasta.lastIndexOf(File.separator)
-   fasta_base = params.fasta.substring(lastPath+1)
-   bwt2_base = fasta_base.toString() - ~/(\.fa)?(\.fasta)?(\.fas)?(\.fsa)?$/
+   //lastPath = params.fasta.lastIndexOf(File.separator)
+   //fasta_base = params.fasta.substring(lastPath+1)
+   //fasta_base = fasta_base.toString() - ~/(\.fa)?(\.fasta)?(\.fas)?(\.fsa)?$/
 
    Channel.fromPath( params.fasta )
 	.ifEmpty { exit 1, "Genome index: Fasta file not found: ${params.fasta}" }
-        .set { fasta_for_index }
+        .into { fasta_for_index }
 }
 else {
    exit 1, "No reference genome specified!"
@@ -218,7 +146,7 @@ else {
 // Chromosome size
 if ( params.chromosome_size ){
    Channel.fromPath( params.chromosome_size , checkIfExists: true)
-         .into {chrsize; chrsize_build; chrsize_raw; chrsize_balance; chrsize_zoom}
+         .into {chrsize; chrsize_build; chrsize_raw; chrsize_balance; chrsize_zoom; chrsize_compartments}
 }
 else if ( params.fasta ){
    Channel.fromPath( params.fasta )
@@ -278,6 +206,9 @@ if (params.res_dist_decay && !params.skip_dist_decay){
 }
 
 if (params.res_compartments && !params.skip_compartments){
+  Channel.fromPath( params.fasta )
+    .ifEmpty { exit 1, "Compartments calling: Fasta file not found: ${params.fasta}" }
+    .set { fasta_for_compartments }
   Channel.from( "${params.res_compartments}" )
     .splitCsv()
     .flatten()
@@ -285,6 +216,7 @@ if (params.res_compartments && !params.skip_compartments){
     map_res = map_res.concat(comp_bin)
     all_res = all_res + ',' + params.res_compartments
 }else{
+  fasta_for_compartments = Channel.empty()
   comp_res = Channel.create()
   if (!params.skip_compartments){
     log.warn "[nf-core/hic] Hi-C resolution for compartment calling not specified. See --res_compartments" 
@@ -295,15 +227,16 @@ map_res
   .unique()
   .into { map_res_summary; map_res; map_res_cool; map_comp }
 
-/**********************************************************
- * SET UP LOGS
- */
+
+////////////////////////////////////////////////////
+/* --         PRINT PARAMETER SUMMARY          -- */
+////////////////////////////////////////////////////
+log.info NfcoreSchema.params_summary_log(workflow, params, json_schema)
 
 // Header log info
-log.info nfcoreHeader()
 def summary = [:]
-if(workflow.revision) summary['Pipeline Release'] = workflow.revision
-summary['Run Name']         = custom_runName ?: workflow.runName
+if (workflow.revision) summary['Pipeline Release'] = workflow.revision
+summary['Run Name']         = workflow.runName
 summary['Input']            = params.input
 summary['splitFastq']       = params.split_fastq
 if (params.split_fastq)
@@ -347,8 +280,6 @@ if (params.email || params.email_on_fail) {
     summary['E-mail on failure'] = params.email_on_fail
     summary['MultiQC maxsize']   = params.max_multiqc_email_size
 }
-log.info summary.collect { k,v -> "${k.padRight(18)}: $v" }.join("\n")
-log.info "-\033[2m--------------------------------------------------\033[0m-"
 
 // Check the hostnames against configured profiles
 checkHostname()
@@ -375,14 +306,11 @@ Channel.from(summary.collect{ [it.key, it.value] })
 
 process get_software_versions {
     publishDir "${params.outdir}/pipeline_info", mode: params.publish_dir_mode,
-        saveAs: { filename ->
-                      if (filename.indexOf(".csv") > 0) filename
-                      else null
-                }
+        saveAs: { filename -> if (filename.indexOf('.csv') > 0) filename else null }
 
-   output:
-   file 'software_versions_mqc.yaml' into software_versions_yaml
-   file "software_versions.csv"
+    output:
+    file 'software_versions_mqc.yaml' into ch_software_versions_yaml
+    file 'software_versions.csv'
 
    script:
    """
@@ -402,9 +330,9 @@ process get_software_versions {
 
 if(!params.bwt2_index && params.fasta){
     process makeBowtie2Index {
-        tag "$bwt2_base"
+        tag "$fasta_base"
         label 'process_highmem'
-        publishDir path: { params.save_reference ? "${params.outdir}/reference_genome" : params.outdir },
+	publishDir path: { params.save_reference ? "${params.outdir}/reference_genome" : params.outdir },
                    saveAs: { params.save_reference ? it : null }, mode: params.publish_dir_mode
 
         input:
@@ -415,9 +343,10 @@ if(!params.bwt2_index && params.fasta){
 	file "bowtie2_index" into bwt2_index_trim
 
         script:
+        fasta_base = fasta.toString() - ~/(\.fa)?(\.fasta)?(\.fas)?(\.fsa)?$/
         """
         mkdir bowtie2_index
-	bowtie2-build ${fasta} bowtie2_index/${bwt2_base}
+	bowtie2-build ${fasta} bowtie2_index/${fasta_base}
 	"""
       }
  }
@@ -427,14 +356,14 @@ if(!params.chromosome_size && params.fasta){
     process makeChromSize {
         tag "$fasta"
 	label 'process_low'
-        publishDir path: { params.save_reference ? "${params.outdir}/reference_genome" : params.outdir },
+	publishDir path: { params.save_reference ? "${params.outdir}/reference_genome" : params.outdir },
                    saveAs: { params.save_reference ? it : null }, mode: params.publish_dir_mode
 
         input:
         file fasta from fasta_for_chromsize
 
         output:
-        file "*.size" into chrsize, chrsize_build, chrsize_raw, chrsize_balance, chrsize_zoom
+        file "*.size" into chrsize, chrsize_build, chrsize_raw, chrsize_balance, chrsize_zoom, chrsize_compartments
 
         script:
         """
@@ -475,8 +404,8 @@ if(!params.restriction_fragments && params.fasta && !params.dnase){
 process bowtie2_end_to_end {
    tag "$sample"
    label 'process_medium'
-   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/hicpro/mapping" : params.outdir },
-   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: params.publish_dir_mode
+   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping/bwt2_end2end" : params.outdir },
+              saveAs: { filename -> if (params.save_aligned_intermediates) filename }, mode: params.publish_dir_mode
 
    input:
    set val(sample), file(reads) from raw_reads
@@ -491,19 +420,21 @@ process bowtie2_end_to_end {
    def bwt2_opts = params.bwt2_opts_end2end
    if (!params.dnase){
    """
+   INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
    bowtie2 --rg-id BMG --rg SM:${prefix} \\
 	${bwt2_opts} \\
 	-p ${task.cpus} \\
-	-x ${index}/${bwt2_base} \\
+	-x \${INDEX} \\
 	--un ${prefix}_unmap.fastq \\
  	-U ${reads} | samtools view -F 4 -bS - > ${prefix}.bam
    """
    }else{
    """
+   INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
    bowtie2 --rg-id BMG --rg SM:${prefix} \\
 	${bwt2_opts} \\
 	-p ${task.cpus} \\
-	-x ${index}/${bwt2_base} \\
+	-x \${INDEX} \\
 	--un ${prefix}_unmap.fastq \\
  	-U ${reads} > ${prefix}.bam
    """
@@ -513,9 +444,9 @@ process bowtie2_end_to_end {
 process trim_reads {
    tag "$sample"
    label 'process_low'
-   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/hicpro/mapping" : params.outdir },
-   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: params.publish_dir_mode
-
+   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping/bwt2_trimmed" : params.outdir },
+              saveAs: { filename -> if (params.save_aligned_intermediates) filename }, mode: params.publish_dir_mode
+              
    when:
    !params.dnase
 
@@ -537,8 +468,8 @@ process trim_reads {
 process bowtie2_on_trimmed_reads {
    tag "$sample"
    label 'process_medium'
-   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/hicpro/mapping" : params.outdir },
-   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: params.publish_dir_mode
+   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping/bwt2_trimmed" : params.outdir },
+   	      saveAs: { filename -> if (params.save_aligned_intermediates) filename }, mode: params.publish_dir_mode
 
    when:
    !params.dnase
@@ -553,10 +484,11 @@ process bowtie2_on_trimmed_reads {
    script:
    prefix = reads.toString() - ~/(_trimmed)?(\.fq)?(\.fastq)?(\.gz)?$/
    """
+   INDEX=`find -L ./ -name "*.rev.1.bt2" | sed 's/.rev.1.bt2//'`
    bowtie2 --rg-id BMG --rg SM:${prefix} \\
            ${params.bwt2_opts_trimmed} \\
            -p ${task.cpus} \\
-           -x ${index}/${bwt2_base} \\
+           -x \${INDEX} \\
            -U ${reads} | samtools view -bS - > ${prefix}_trimmed.bam
    """
 }
@@ -565,8 +497,9 @@ if (!params.dnase){
    process bowtie2_merge_mapping_steps{
       tag "$prefix = $bam1 + $bam2"
       label 'process_medium'
-      publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/hicpro/mapping" : params.outdir },
-   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: params.publish_dir_mode
+      publishDir "${params.outdir}/hicpro/mapping", mode: params.publish_dir_mode,
+   	      saveAs: { filename -> if (params.save_aligned_intermediates && filename.endsWith("stat")) "stats/$filename"
+			else if (params.save_aligned_intermediates) filename}
 
       input:
       set val(prefix), file(bam1), file(bam2) from end_to_end_bam.join( trimmed_bam ).dump(tag:'merge')
@@ -576,9 +509,7 @@ if (!params.dnase){
       set val(oname), file("${prefix}.mapstat") into all_mapstat
 
       script:
-      //sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2|_1|_2)/
       sample = prefix.toString() - ~/(_R1|_R2)/
-      //tag = prefix.toString() =~/_R1|_val_1|_1/ ? "R1" : "R2"
       tag = prefix.toString() =~/_R1/ ? "R1" : "R2"
       oname = prefix.toString() - ~/(\.[0-9]+)$/
       """
@@ -587,7 +518,7 @@ if (!params.dnase){
                      ${bam1} ${bam2}
 
       samtools sort -@ ${task.cpus} -m 800M \\
-      	            -n -T /tmp/ \\
+      	            -n  \\
 	            -o ${prefix}_bwt2merged.sorted.bam \\
 	            ${prefix}_bwt2merged.bam
 
@@ -608,8 +539,9 @@ if (!params.dnase){
    process dnase_mapping_stats{
       tag "$sample = $bam"
       label 'process_medium'
-      publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/hicpro/mapping" : params.outdir },
-   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: params.publish_dir_mode
+      publishDir "${params.outdir}/hicpro/mapping",  mode: params.publish_dir_mode, 
+   	      saveAs: { filename -> if (params.save_aligned_intermediates && filename.endsWith("stat")) "stats/$filename"
+	                else if (params.save_aligned_intermediates) filename}
 
       input:
       set val(prefix), file(bam) from end_to_end_bam
@@ -641,10 +573,10 @@ process combine_mates{
    tag "$sample = $r1_prefix + $r2_prefix"
    label 'process_low'
    publishDir "${params.outdir}/hicpro/mapping", mode: params.publish_dir_mode,
-   	      saveAs: {filename -> filename.indexOf(".pairstat") > 0 ? "stats/$filename" : "$filename"}
+   	      saveAs: {filename -> filename.endsWith(".pairstat") ? "stats/$filename" : "$filename"}
 
    input:
-   set val(sample), file(aligned_bam) from bwt2_merged_bam.groupTuple().dump(tag:'mates')
+   set val(sample), file(aligned_bam) from bwt2_merged_bam.groupTuple()
 
    output:
    set val(oname), file("${sample}_bwt2pairs.bam") into paired_bam
@@ -677,7 +609,9 @@ if (!params.dnase){
       tag "$sample"
       label 'process_low'
       publishDir "${params.outdir}/hicpro/valid_pairs", mode: params.publish_dir_mode,
-   	      saveAs: {filename -> filename.indexOf(".stat") > 0 ? "stats/$filename" : "$filename"}
+   	      saveAs: {filename -> if (filename.endsWith("RSstat")) "stats/$filename"
+                                   else if (filename.endsWith(".validPairs")) filename
+                                   else if (params.save_nonvalid_pairs) filename}
 
       input:
       set val(sample), file(pe_bam) from paired_bam
@@ -707,7 +641,7 @@ if (!params.dnase){
       prefix = pe_bam.toString() - ~/.bam/
       """
       mapped_2hic_fragments.py -f ${frag_file} -r ${pe_bam} --all ${opts}
-      sort -T /tmp/ -k2,2V -k3,3n -k5,5V -k6,6n -o ${prefix}.validPairs ${prefix}.validPairs
+      sort -k2,2V -k3,3n -k5,5V -k6,6n -o ${prefix}.validPairs ${prefix}.validPairs
       """
    }
 }
@@ -716,7 +650,8 @@ else{
       tag "$sample"
       label 'process_low'
       publishDir "${params.outdir}/hicpro/valid_pairs", mode: params.publish_dir_mode,
-   	      saveAs: {filename -> filename.indexOf(".stat") > 0 ? "stats/$filename" : "$filename"}
+   	      saveAs: {filename -> if (filename.endsWith("RSstat")) "stats/$filename" 
+                                   else filename}
 
       input:
       set val(sample), file(pe_bam) from paired_bam
@@ -735,7 +670,7 @@ else{
       prefix = pe_bam.toString() - ~/.bam/
       """
       mapped_2hic_dnase.py -r ${pe_bam} ${opts}
-      sort -T /tmp/ -k2,2V -k3,3n -k5,5V -k6,6n -o ${prefix}.validPairs ${prefix}.validPairs
+      sort -k2,2V -k3,3n -k5,5V -k6,6n -o ${prefix}.validPairs ${prefix}.validPairs
       """
    }
 }
@@ -748,14 +683,15 @@ process remove_duplicates {
    tag "$sample"
    label 'process_highmem'
    publishDir "${params.outdir}/hicpro/valid_pairs", mode: params.publish_dir_mode,
-   	      saveAs: {filename -> filename.indexOf(".stat") > 0 ? "stats/$sample/$filename" : "$filename"}
-
+               saveAs: {filename -> if (filename.endsWith("mergestat")) "stats/$filename" 
+                                    else if (filename.endsWith("allValidPairs")) "$filename"}
    input:
-   set val(sample), file(vpairs) from valid_pairs.groupTuple().dump(tag:'final')
+   set val(sample), file(vpairs) from valid_pairs.groupTuple()
 
    output:
    set val(sample), file("*.allValidPairs") into ch_vpairs, ch_vpairs_cool
-   file("stats/") into all_mergestat
+   file("stats/") into mqc_mergestat
+   file("*mergestat") into all_mergestat
 
    script:
    if ( ! params.keep_dups ){
@@ -763,29 +699,35 @@ process remove_duplicates {
    mkdir -p stats/${sample}
 
    ## Sort valid pairs and remove read pairs with same starts (i.e duplicated read pairs)
-   sort -T /tmp/ -S 50% -k2,2V -k3,3n -k5,5V -k6,6n -m ${vpairs} | \
+   sort -S 50% -k2,2V -k3,3n -k5,5V -k6,6n -m ${vpairs} | \
    awk -F"\\t" 'BEGIN{c1=0;c2=0;s1=0;s2=0}(c1!=\$2 || c2!=\$5 || s1!=\$3 || s2!=\$6){print;c1=\$2;c2=\$5;s1=\$3;s2=\$6}' > ${sample}.allValidPairs
 
-   echo -n "valid_interaction\t" > stats/${sample}/${sample}_allValidPairs.mergestat
-   cat ${vpairs} | wc -l >> stats/${sample}/${sample}_allValidPairs.mergestat
-   echo -n "valid_interaction_rmdup\t" >> stats/${sample}/${sample}_allValidPairs.mergestat
-   cat ${sample}.allValidPairs | wc -l >> stats/${sample}/${sample}_allValidPairs.mergestat
+   echo -n "valid_interaction\t" > ${sample}_allValidPairs.mergestat
+   cat ${vpairs} | wc -l >> ${sample}_allValidPairs.mergestat
+   echo -n "valid_interaction_rmdup\t" >> ${sample}_allValidPairs.mergestat
+   cat ${sample}.allValidPairs | wc -l >> ${sample}_allValidPairs.mergestat
 
    ## Count short range (<20000) vs long range contacts
-   awk 'BEGIN{cis=0;trans=0;sr=0;lr=0} \$2 == \$5{cis=cis+1; d=\$6>\$3?\$6-\$3:\$3-\$6; if (d<=20000){sr=sr+1}else{lr=lr+1}} \$2!=\$5{trans=trans+1}END{print "trans_interaction\\t"trans"\\ncis_interaction\\t"cis"\\ncis_shortRange\\t"sr"\\ncis_longRange\\t"lr}' ${sample}.allValidPairs >> stats/${sample}/${sample}_allValidPairs.mergestat
-
+   awk 'BEGIN{cis=0;trans=0;sr=0;lr=0} \$2 == \$5{cis=cis+1; d=\$6>\$3?\$6-\$3:\$3-\$6; if (d<=20000){sr=sr+1}else{lr=lr+1}} \$2!=\$5{trans=trans+1}END{print "trans_interaction\\t"trans"\\ncis_interaction\\t"cis"\\ncis_shortRange\\t"sr"\\ncis_longRange\\t"lr}' ${sample}.allValidPairs >> ${sample}_allValidPairs.mergestat
+ 
+   ## For MultiQC
+   mkdir -p stats/${sample} 
+   cp ${sample}_allValidPairs.mergestat stats/${sample}/
    """
    }else{
    """
-   mkdir -p stats/${sample}
    cat ${vpairs} > ${sample}.allValidPairs
-   echo -n "valid_interaction\t" > stats/${sample}/${sample}_allValidPairs.mergestat
-   cat ${vpairs} | wc -l >> stats/${sample}/${sample}_allValidPairs.mergestat
-   echo -n "valid_interaction_rmdup\t" >> stats/${sample}/${sample}_allValidPairs.mergestat
-   cat ${sample}.allValidPairs | wc -l >> stats/${sample}/${sample}_allValidPairs.mergestat
+   echo -n "valid_interaction\t" > ${sample}_allValidPairs.mergestat
+   cat ${vpairs} | wc -l >> ${sample}_allValidPairs.mergestat
+   echo -n "valid_interaction_rmdup\t" >> ${sample}_allValidPairs.mergestat
+   cat ${sample}.allValidPairs | wc -l >> ${sample}_allValidPairs.mergestat
 
    ## Count short range (<20000) vs long range contacts
-   awk 'BEGIN{cis=0;trans=0;sr=0;lr=0} \$2 == \$5{cis=cis+1; d=\$6>\$3?\$6-\$3:\$3-\$6; if (d<=20000){sr=sr+1}else{lr=lr+1}} \$2!=\$5{trans=trans+1}END{print "trans_interaction\\t"trans"\\ncis_interaction\\t"cis"\\ncis_shortRange\\t"sr"\\ncis_longRange\\t"lr}' ${sample}.allValidPairs >> stats/${sample}/${sample}_allValidPairs.mergestat
+   awk 'BEGIN{cis=0;trans=0;sr=0;lr=0} \$2 == \$5{cis=cis+1; d=\$6>\$3?\$6-\$3:\$3-\$6; if (d<=20000){sr=sr+1}else{lr=lr+1}} \$2!=\$5{trans=trans+1}END{print "trans_interaction\\t"trans"\\ncis_interaction\\t"cis"\\ncis_shortRange\\t"sr"\\ncis_longRange\\t"lr}' ${sample}.allValidPairs >> ${sample}_allValidPairs.mergestat
+
+   ## For MultiQC
+   mkdir -p stats/${sample}
+   cp ${sample}_allValidPairs.mergestat stats/${sample}/
    """
    }
 }
@@ -793,13 +735,15 @@ process remove_duplicates {
 process merge_stats {
    tag "$ext"
    label 'process_low'
-   publishDir "${params.outdir}/hicpro/stats/${sample}", mode: params.publish_dir_mode
+   publishDir "${params.outdir}/hicpro/", mode: params.publish_dir_mode,
+               saveAs: {filename -> if (filename.endsWith("stat")) "stats/$filename"}
 
    input:
    set val(prefix), file(fstat) from all_mapstat.groupTuple().concat(all_pairstat.groupTuple(), all_rsstat.groupTuple())
 
    output:
-   file("mstats/") into all_mstats
+   file("stats/") into mqc_mstats
+   file("*stat") into all_mstats
 
   script:
   sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2|_1|_2)/
@@ -807,14 +751,15 @@ process merge_stats {
   if ( (fstat =~ /.pairstat/) ){ ext = "mpairstat" }
   if ( (fstat =~ /.RSstat/) ){ ext = "mRSstat" }
   """
-  mkdir -p mstats/${sample}
-  merge_statfiles.py -f ${fstat} > mstats/${sample}/${prefix}.${ext}
+  merge_statfiles.py -f ${fstat} > ${prefix}.${ext}
+  mkdir -p stats/${sample}
+  cp ${prefix}.${ext} stats/${sample}/
   """
 }
 
 /*
  * HiC-Pro build matrix processes
- * ONGOING VALIDATION - TO REPLACED BY COOLER ?
+ * kept for backward compatibility
  */
 
 
@@ -824,7 +769,7 @@ process build_contact_maps{
    publishDir "${params.outdir}/hicpro/matrix/raw", mode: params.publish_dir_mode
 
    when:
-   !params.skip_maps
+   !params.skip_maps && params.hicpro_maps
 
    input:
    set val(sample), file(vpairs), val(mres) from ch_vpairs.combine(map_res)
@@ -845,21 +790,21 @@ process run_ice{
    publishDir "${params.outdir}/hicpro/matrix/iced", mode: params.publish_dir_mode
 
    when:
-   !params.skip_maps && !params.skip_ice
+   !params.skip_maps && !params.skip_balancing && params.hicpro_maps
 
    input:
    set val(sample), val(res), file(rmaps), file(bed) from raw_maps
 
    output:
-   set val(sample), val(res), file("*iced.matrix"), file(bed) into iced_maps_4h5, iced_maps_4cool
-   file ("*.biases") into iced_bias
+   set val(sample), val(res), file("*iced.matrix"), file(bed) into hicpro_iced_maps
+   file ("*.biases") into hicpro_iced_bias
 
    script:
    prefix = rmaps.toString() - ~/(\.matrix)?$/
    """
-   ice --filter_low_counts_perc ${params.ice_filer_low_count_perc} \
+   ice --filter_low_counts_perc ${params.ice_filter_low_count_perc} \
    --results_filename ${prefix}_iced.matrix \
-   --filter_high_counts_perc ${params.ice_filer_high_count_perc} \
+   --filter_high_counts_perc ${params.ice_filter_high_count_perc} \
    --max_iter ${params.ice_max_iter} --eps ${params.ice_eps} --remove-all-zeros-loci --output-bias 1 --verbose 1 ${rmaps}
    """
 }
@@ -897,7 +842,7 @@ process cooler_raw {
   label 'process_medium'
 
   publishDir "${params.outdir}/contact_maps/", mode: 'copy',
-              saveAs: {filename -> filename.indexOf(".cool") > 0 ? "raw/cool/$filename" : "raw/txt/$filename"}
+              saveAs: {filename -> filename.endsWith(".cool") ? "raw/cool/$filename" : "raw/txt/$filename"}
 
   input:
   set val(sample), file(contacts), val(res) from cool_build.combine(map_res_cool)
@@ -920,7 +865,7 @@ process cooler_balance {
   label 'process_medium'
 
   publishDir "${params.outdir}/contact_maps/", mode: 'copy',
-              saveAs: {filename -> filename.indexOf(".cool") > 0 ? "norm/cool/$filename" : "norm/txt/$filename"}
+              saveAs: {filename -> filename.endsWith(".cool") ? "norm/cool/$filename" : "norm/txt/$filename"}
 
   when:
   !params.skip_balancing
@@ -965,31 +910,6 @@ process cooler_zoomify {
 }
 
 
-/*
- * Create h5 file
-
-process convert_to_h5 {
-  tag "$sample"
-  label 'process_medium'
-  publishDir "${params.outdir}/contact_maps/norm/h5", mode: 'copy'
-
-  input:
-  set val(sample), val(res), file(maps)  from norm_cool_maps_h5
-
-  output:
-  set val(sample), val(res), file("*.h5") into h5maps_ddecay, h5maps_ccomp, h5maps_tads
-
-  script:
-  """
-  hicConvertFormat --matrices ${maps} \
-  		   --outFileName ${maps.baseName}.h5 \
-		   --resolution ${res} \
-		   --inputFormat cool \
-		   --outputFormat h5 \
-  """
-}
-*/
-
 /****************************************************
  * DOWNSTREAM ANALYSIS
  */
@@ -1015,7 +935,7 @@ process dist_decay {
   !params.skip_dist_decay
 
   input:
-  set val(sample), val(res), file(h5mat), val(r) from chddecay
+  set val(sample), val(res), file(maps), val(r) from chddecay
   
   output:
   file("*_distcount.txt")
@@ -1024,9 +944,9 @@ process dist_decay {
 
   script:
   """
-  hicPlotDistVsCounts --matrices ${h5mat} \
-                      --plotFile ${h5mat.baseName}_distcount.png \
-  		      --outFileData ${h5mat.baseName}_distcount.txt
+  hicPlotDistVsCounts --matrices ${maps} \
+                      --plotFile ${maps.baseName}_distcount.png \
+  		      --outFileData ${maps.baseName}_distcount.txt
   """
 }
 
@@ -1050,13 +970,18 @@ process compartment_calling {
 
   input:
   set val(sample), val(res), file(cool), val(r) from chcomp
+  file(fasta) from fasta_for_compartments.collect()
+  file(chrsize) from chrsize_compartments.collect()
 
   output:
   file("*compartments*") optional true into out_compartments
 
   script:
   """
+  cooltools genome binnify --all-names ${chrsize} ${res} > genome_bins.txt
+  cooltools genome gc genome_bins.txt ${fasta} > genome_gc.txt 
   cooltools call-compartments --contact-type cis -o ${sample}_compartments ${cool}
+  awk -F"\t" 'NR>1{OFS="\t"; if(\$6==""){\$6=0}; print \$1,\$2,\$3,\$6}' ${sample}_compartments.cis.vecs.tsv | sort -k1,1 -k2,2n > ${sample}_compartments.cis.E1.bedgraph
   """
 }
 
@@ -1135,8 +1060,8 @@ process multiqc {
    input:
    file multiqc_config from ch_multiqc_config
    file (mqc_custom_config) from ch_multiqc_custom_config.collect().ifEmpty([])
-   file ('input_*/*') from all_mstats.concat(all_mergestat).collect()
-   file ('software_versions/*') from software_versions_yaml
+   file ('input_*/*') from mqc_mstats.concat(mqc_mergestat).collect()
+   file ('software_versions/*') from ch_software_versions_yaml
    file workflow_summary from ch_workflow_summary.collect()
 
    output:
@@ -1144,14 +1069,18 @@ process multiqc {
    file "*_data"
 
    script:
-   rtitle = custom_runName ? "--title \"$custom_runName\"" : ''
-   rfilename = custom_runName ? "--filename " + custom_runName.replaceAll('\\W','_').replaceAll('_+','_') + "_multiqc_report" : ''
+   rtitle = ''
+   rfilename = ''
+   if (!(workflow.runName ==~ /[a-z]+_[a-z]+/)) {
+     rtitle = "--title \"${workflow.runName}\""
+     rfilename = "--filename " + workflow.runName.replaceAll('\\W','_').replaceAll('_+','_') + "_multiqc_report"
+   }
+   custom_config_file = params.multiqc_config ? "--config $mqc_custom_config" : ''
    """
-   multiqc -f $rtitle $rfilename --config $multiqc_config .
+   multiqc -f $rtitle $rfilename $custom_config_file .
    """
 }
 
-
 /*
  * Output Description HTML
  */
@@ -1163,7 +1092,7 @@ process output_documentation {
     file images from ch_output_docs_images
 
     output:
-    file "results_description.html"
+    file 'results_description.html'
 
     script:
     """
@@ -1184,7 +1113,7 @@ workflow.onComplete {
     }
     def email_fields = [:]
     email_fields['version'] = workflow.manifest.version
-    email_fields['runName'] = custom_runName ?: workflow.runName
+    email_fields['runName'] = workflow.runName
     email_fields['success'] = workflow.success
     email_fields['dateComplete'] = workflow.complete
     email_fields['duration'] = workflow.duration
@@ -1289,28 +1218,9 @@ workflow.onComplete {
     }
 }
 
-
-def nfcoreHeader() {
-    // Log colors ANSI codes
-    c_black = params.monochrome_logs ? '' : "\033[0;30m";
-    c_blue = params.monochrome_logs ? '' : "\033[0;34m";
-    c_cyan = params.monochrome_logs ? '' : "\033[0;36m";
-    c_dim = params.monochrome_logs ? '' : "\033[2m";
-    c_green = params.monochrome_logs ? '' : "\033[0;32m";
-    c_purple = params.monochrome_logs ? '' : "\033[0;35m";
-    c_reset = params.monochrome_logs ? '' : "\033[0m";
-    c_white = params.monochrome_logs ? '' : "\033[0;37m";
-    c_yellow = params.monochrome_logs ? '' : "\033[0;33m";
-
-    return """    -${c_dim}--------------------------------------------------${c_reset}-
-                                            ${c_green},--.${c_black}/${c_green},-.${c_reset}
-    ${c_blue}        ___     __   __   __   ___     ${c_green}/,-._.--~\'${c_reset}
-    ${c_blue}  |\\ | |__  __ /  ` /  \\ |__) |__         ${c_yellow}}  {${c_reset}
-    ${c_blue}  | \\| |       \\__, \\__/ |  \\ |___     ${c_green}\\`-._,-`-,${c_reset}
-                                            ${c_green}`._,._,\'${c_reset}
-    ${c_purple}  nf-core/hic v${workflow.manifest.version}${c_reset}
-    -${c_dim}--------------------------------------------------${c_reset}-
-    """.stripIndent()
+workflow.onError {
+    // Print unexpected parameters - easiest is to just rerun validation
+    NfcoreSchema.validateParameters(params, json_schema, log)
 }
 
 def checkHostname() {
@@ -1319,15 +1229,15 @@ def checkHostname() {
     def c_red = params.monochrome_logs ? '' : "\033[1;91m"
     def c_yellow_bold = params.monochrome_logs ? '' : "\033[1;93m"
     if (params.hostnames) {
-        def hostname = "hostname".execute().text.trim()
+        def hostname = 'hostname'.execute().text.trim()
         params.hostnames.each { prof, hnames ->
             hnames.each { hname ->
                 if (hostname.contains(hname) && !workflow.profile.contains(prof)) {
-                    log.error "====================================================\n" +
+                    log.error "${c_red}====================================================${c_reset}\n" +
                             "  ${c_red}WARNING!${c_reset} You are running with `-profile $workflow.profile`\n" +
                             "  but your machine hostname is ${c_white}'$hostname'${c_reset}\n" +
                             "  ${c_yellow_bold}It's highly recommended that you use `-profile $prof${c_reset}`\n" +
-                            "============================================================"
+                            "${c_red}====================================================${c_reset}\n"
                 }
             }
         }
diff --git a/nextflow.config b/nextflow.config
index 6e8eae6adffce6204bfa2353fcc5d27a8a52ae0c..c9fbf5424ba1b4f0ca5d00d54e388d134e390d9e 100644
--- a/nextflow.config
+++ b/nextflow.config
@@ -9,7 +9,8 @@
 params {
   // Inputs / outputs
   genome = false
-  input = "data/*{1,2}.fastq.gz"
+  input = null
+  input_paths = null
   outdir = './results'
   genome = false
   input_paths = false
@@ -53,6 +54,7 @@ params {
   max_restriction_fragment_size = 0
   min_insert_size = 0
   max_insert_size = 0
+  save_nonvalid_pairs = false
 
   // Dnase Hi-C
   dnase = false
@@ -61,9 +63,10 @@ params {
   // Contact maps
   bin_size = '1000000'
   res_zoomify = '5000'
+  hicpro_maps = false
   ice_max_iter = 100
-  ice_filer_low_count_perc = 0.02
-  ice_filer_high_count_perc =  0
+  ice_filter_low_count_perc = 0.02
+  ice_filter_high_count_perc =  0
   ice_eps = 0.1
 
   // Downstream Analysis
@@ -74,7 +77,6 @@ params {
 
   // Workflow
   skip_maps = false
-  skip_ice = false
   skip_balancing = false
   skip_mcool = false
   skip_dist_decay = false
@@ -85,14 +87,13 @@ params {
   // Boilerplate options
   publish_dir_mode = 'copy'
   multiqc_config = false
-  name = false
   email = false
   email_on_fail = false
   max_multiqc_email_size = 25.MB
   plaintext_email = false
   monochrome_logs = false
   help = false
-  igenomes_base = 's3://ngi-igenomes/igenomes/'
+  igenomes_base = 's3://ngi-igenomes/igenomes'
   tracedir = "${params.outdir}/pipeline_info"
   igenomes_ignore = false
 
@@ -100,9 +101,13 @@ params {
   custom_config_version = 'master'
   custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
   hostnames = false
+  config_profile_name = null
   config_profile_description = false
   config_profile_contact = false
   config_profile_url = false
+  validate_params = true
+  show_hidden_params = false
+  schema_ignore_params = 'genomes,digest,input_paths'
 
   // Defaults only, expecting to be overwritten
   max_memory = 24.GB
@@ -126,10 +131,21 @@ try {
 
 // Create profiles
 profiles {
-  conda { process.conda = "$projectDir/environment.yml" }
+  conda {
+    docker.enabled = false
+    singularity.enabled = false
+    podman.enabled = false
+    shifter.enabled = false
+    charliecloud.enabled = false
+    process.conda = "$projectDir/environment.yml"
+  }
   debug { process.beforeScript = 'echo $HOSTNAME' }
   docker {
     docker.enabled = true
+    singularity.enabled = false
+    podman.enabled = false
+    shifter.enabled = false
+    charliecloud.enabled = false
     // Avoid this error:
     //   WARNING: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.
     // Testing this in nf-core after discussion here https://github.com/nf-core/tools/pull/351
@@ -137,11 +153,33 @@ profiles {
     docker.runOptions = '-u \$(id -u):\$(id -g)'
   }
   singularity {
+    docker.enabled = false
     singularity.enabled = true
+    podman.enabled = false
+    shifter.enabled = false
+    charliecloud.enabled = false
     singularity.autoMounts = true
   }
   podman {
+    singularity.enabled = false
+    docker.enabled = false
     podman.enabled = true
+    shifter.enabled = false
+    charliecloud.enabled = false
+  }
+  shifter {
+    singularity.enabled = false
+    docker.enabled = false
+    podman.enabled = false
+    shifter.enabled = true
+    charliecloud.enabled = false
+  }
+  charliecloud {
+    singularity.enabled = false
+    docker.enabled = false
+    podman.enabled = false
+    shifter.enabled = false
+    charliecloud.enabled = true
   }
   test { includeConfig 'conf/test.config' }
   test_full { includeConfig 'conf/test_full.config' }
@@ -162,21 +200,22 @@ env {
 // Capture exit codes from upstream processes when piping
 process.shell = ['/bin/bash', '-euo', 'pipefail']
 
+def trace_timestamp = new java.util.Date().format( 'yyyy-MM-dd_HH-mm-ss')
 timeline {
   enabled = true
-  file = "${params.tracedir}/execution_timeline.html"
+  file = "${params.tracedir}/execution_timeline_${trace_timestamp}.html"
 }
 report {
   enabled = true
-  file = "${params.tracedir}/execution_report.html"
+  file = "${params.tracedir}/execution_report_${trace_timestamp}.html"
 }
 trace {
   enabled = true
-  file = "${params.tracedir}/execution_trace.txt"
+  file = "${params.tracedir}/execution_trace_${trace_timestamp}.txt"
 }
 dag {
   enabled = true
-  file = "${params.tracedir}/pipeline_dag.svg"
+  file = "${params.tracedir}/pipeline_dag_${trace_timestamp}.svg"
 }
 
 manifest {
diff --git a/nextflow_schema.json b/nextflow_schema.json
index 952fb4ab4349d331ad9a8f632db5f43b3ce59f33..a6bead5e83ec99ef88915b82b31d89abd7fba9ef 100644
--- a/nextflow_schema.json
+++ b/nextflow_schema.json
@@ -62,7 +62,7 @@
                 "igenomes_base": {
                     "type": "string",
                     "description": "Directory / URL base for iGenomes references.",
-                    "default": "s3://ngi-igenomes/igenomes/",
+                    "default": "s3://ngi-igenomes/igenomes",
                     "fa_icon": "fas fa-cloud-download-alt",
                     "hidden": true
                 },
@@ -118,6 +118,12 @@
                     "description": "If generated by the pipeline save the annotation and indexes in the results directory.",
                     "help_text": "Use this parameter to save all annotations to your results folder. These can then be used for future pipeline runs, reducing processing times.",
                     "fa_icon": "fas fa-save"
+                },
+                "save_nonvalid_pairs": {
+                    "type": "boolean",
+                    "description": "Save the non valid pairs detected by HiC-Pro.",
+                    "help_text": "Use this parameter to save non valid pairs detected by HiC-Pro (dangling-end, self-circle, re-ligation, filtered).",
+                    "fa_icon": "fas fa-save"
                 }
             }
         },
@@ -133,7 +139,6 @@
                 },
                 "min_cis_dist": {
                     "type": "integer",
-                    "default": "O",
                     "description": "Minimum distance between loci to consider. Useful for --dnase mode to remove spurious ligation products. Only values > 0 are considered"
                 }
             }
@@ -148,28 +153,27 @@
                 "split_fastq": {
                     "type": "boolean",
                     "description": "Split the reads into chunks before running the pipelne",
-                    "fa_icon": "fas fa-dna",
-                    "default": "false"
+                    "fa_icon": "fas fa-dna"
                 },
                 "fastq_chunks_size": {
                     "type": "integer",
                     "description": "Read number per chunks if split_fastq is used",
-                    "default": "20000000"
+                    "default": 20000000
                 },
                 "min_mapq": {
                     "type": "integer",
-                    "default": "10",
+                    "default": 10,
                     "description": "Keep aligned reads with a minimum quality value"
                 },
                 "bwt2_opts_end2end": {
                     "type": "string",
                     "default": "'--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'",
-                    "description": "Option for end-to-end bowtie mapping"
+                    "description": "Option for HiC-Pro end-to-end bowtie mapping"
                 },
                 "bwt2_opts_trimmed": {
                     "type": "string",
                     "default": "'--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'",
-                    "description": "Option for trimmed reads mapping"
+                    "description": "Option for HiC-Pro trimmed reads mapping"
                 },
                 "save_aligned_intermediates": {
                     "type": "boolean",
@@ -185,33 +189,27 @@
             "fa_icon": "fas fa-signature",
             "properties": {
                 "keep_dups": {
-                    "type": "string",
-                    "description": "Keep duplicated reads",
-                    "default": "False"
+                    "type": "boolean",
+                    "description": "Keep duplicated reads"
                 },
                 "keep_multi": {
-                    "type": "string",
-                    "description": "Keep multi-aligned reads",
-                    "default": "False"
+                    "type": "boolean",
+                    "description": "Keep multi-aligned reads"
                 },
                 "max_insert_size": {
                     "type": "integer",
-                    "default": "0",
                     "description": "Maximum fragment size to consider. Only values > 0 are considered"
                 },
                 "min_insert_size": {
                     "type": "integer",
-                    "default": "0",
                     "description": "Minimum fragment size to consider. Only values > 0 are considered"
                 },
                 "max_restriction_fragment_size": {
                     "type": "integer",
-                    "default": "0",
                     "description": "Maximum restriction fragment size to consider. Only values > 0 are considered"
                 },
                 "min_restriction_fragment_size": {
                     "type": "integer",
-                    "default": "0",
                     "description": "Minimum restriction fragment size to consider. Only values > 0 are considered"
                 },
                 "save_interaction_bam": {
@@ -220,7 +218,7 @@
                 }
             }
         },
-        "contact_maps_options": {
+        "contact_maps": {
             "title": "Contact maps",
             "type": "object",
             "description": "Options to build Hi-C contact maps",
@@ -232,29 +230,32 @@
                     "default": "'1000000,500000'",
                     "description": "Resolution to build the maps (comma separated)"
                 },
-                "ice_filer_low_count_perc": {
-                    "type": "string",
+                "hicpro_maps": {
+                    "type": "boolean",
+                    "description": "Generate raw and normalized contact maps with HiC-Pro"
+                },
+                "ice_filter_low_count_perc": {
+                    "type": "number",
                     "default": 0.02,
-                    "description": "Filter low counts rows before normalization"
+                    "description": "Filter low counts rows before HiC-Pro normalization"
                 },
-                "ice_filer_high_count_perc": {
+                "ice_filter_high_count_perc": {
                     "type": "integer",
-                    "default": "0",
-                    "description": "Filter high counts rows before normalization"
+                    "description": "Filter high counts rows before HiC-Pro normalization"
                 },
                 "ice_eps": {
-                    "type": "string",
-                    "default": "0.1",
-                    "description": "Threshold for ICE convergence"
+                    "type": "number",
+                    "default": 0.1,
+                    "description": "Threshold for HiC-Pro ICE convergence"
                 },
                 "ice_max_iter": {
                     "type": "integer",
-                    "default": "100",
-                    "description": "Maximum number of iteraction for ICE normalization"
+                    "default": 100,
+                    "description": "Maximum number of iteraction for HiC-Pro ICE normalization"
                 },
                 "res_zoomify": {
-                    "type": "integer",
-                    "default": 5000,
+                    "type": "string",
+                    "default": "5000",
                     "description": "Maximum resolution to build mcool file"
                 }
             }
@@ -266,8 +267,8 @@
             "default": "",
             "properties": {
                 "res_dist_decay": {
-                    "type": "integer",
-                    "default": 1000000,
+                    "type": "string",
+                    "default": "1000000",
                     "description": "Resolution to build count/distance plot"
                 },
                 "tads_caller": {
@@ -281,8 +282,8 @@
                     "description": "Resolution to run TADs callers (comma separated)"
                 },
                 "res_compartments": {
-                    "type": "integer",
-                    "default": 250000,
+                    "type": "string",
+                    "default": "250000",
                     "description": "Resolution for compartments calling"
                 }
             }
@@ -298,34 +299,25 @@
                     "type": "boolean",
                     "description": "Do not build contact maps"
                 },
-                "skip_ice": {
-                    "type": "string",
-                    "description": "Do not run ICE normalization",
-                    "default": "False"
-                },
                 "skip_dist_decay": {
-                    "type": "string",
-                    "description": "Do not run distance/decay plot",
-                    "default": "False"
+                    "type": "boolean",
+                    "description": "Do not run distance/decay plot"
                 },
                 "skip_tads": {
-                    "type": "string",
-                    "description": "Do not run TADs calling",
-                    "default": "False"
+                    "type": "boolean",
+                    "description": "Do not run TADs calling"
                 },
                 "skip_compartments": {
                     "type": "string",
                     "description": "Do not run compartments calling"
                 },
                 "skip_balancing": {
-                    "type": "string",
-                    "description": "Do not run cooler balancing normalization",
-                    "default": "False"
+                    "type": "boolean",
+                    "description": "Do not run cooler balancing normalization"
                 },
                 "skip_mcool": {
-                    "type": "string",
-                    "description": "Do not generate mcool file for Higlass visualization",
-                    "default": "False"
+                    "type": "boolean",
+                    "description": "Do not generate mcool file for Higlass visualization"
                 },
                 "skip_multiqc": {
                     "type": "boolean",
@@ -362,12 +354,12 @@
                         "move"
                     ]
                 },
-                "name": {
-                    "type": "string",
-                    "description": "Workflow name.",
-                    "fa_icon": "fas fa-fingerprint",
-                    "hidden": true,
-                    "help_text": "A custom name for the pipeline run. Unlike the core nextflow `-name` option with one hyphen this parameter can be reused multiple times, for example if using `-resume`. Passed through to steps such as MultiQC and used for things like report filenames and titles."
+                "validate_params": {
+                    "type": "boolean",
+                    "description": "Boolean whether to validate parameters against the schema at runtime",
+                    "default": true,
+                    "fa_icon": "fas fa-check-square",
+                    "hidden": true
                 },
                 "email_on_fail": {
                     "type": "string",
@@ -411,6 +403,13 @@
                     "default": "${params.outdir}/pipeline_info",
                     "fa_icon": "fas fa-cogs",
                     "hidden": true
+                },
+                "show_hidden_params": {
+                    "type": "boolean",
+                    "fa_icon": "far fa-eye-slash",
+                    "description": "Show all params when using `--help`",
+                    "hidden": true,
+                    "help_text": "By default, parameters set as _hidden_ in the schema are not shown on the command line when a user runs with `--help`. Specifying this option will tell the pipeline to show all parameters."
                 }
             }
         },
@@ -434,6 +433,7 @@
                     "description": "Maximum amount of memory that can be requested for any single job.",
                     "default": "128.GB",
                     "fa_icon": "fas fa-memory",
+                    "pattern": "^\\d+(\\.\\d+)?\\.?\\s*(K|M|G|T)?B$",
                     "hidden": true,
                     "help_text": "Use to set an upper-limit for the memory requirement for each process. Should be a string in the format integer-unit e.g. `--max_memory '8.GB'`"
                 },
@@ -442,6 +442,7 @@
                     "description": "Maximum amount of time that can be requested for any single job.",
                     "default": "240.h",
                     "fa_icon": "far fa-clock",
+                    "pattern": "^(\\d+\\.?\\s*(s|m|h|day)\\s*)+$",
                     "hidden": true,
                     "help_text": "Use to set an upper-limit for the time requirement for each process. Should be a string in the format integer-unit e.g. `--max_time '2.h'`"
                 }
@@ -476,6 +477,11 @@
                     "hidden": true,
                     "fa_icon": "fas fa-users-cog"
                 },
+                "config_profile_name": {
+                    "type": "string",
+                    "description": "Institutional config name",
+                    "hidden": true
+                },
                 "config_profile_description": {
                     "type": "string",
                     "description": "Institutional config description.",
@@ -517,7 +523,7 @@
             "$ref": "#/definitions/valid_pairs_detection"
         },
         {
-            "$ref": "#/definitions/contact_maps_options"
+            "$ref": "#/definitions/contact_maps"
         },
         {
             "$ref": "#/definitions/downstream_analysis"
@@ -535,4 +541,4 @@
             "$ref": "#/definitions/institutional_config_options"
         }
     ]
-}
\ No newline at end of file
+}