diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index b88d0f43654c3a170c0cf930e212fa316dda28e2..929f493c0b09f80151a8816b167840ea8eca9f30 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -1,47 +1,61 @@
 # nf-core/hic: Contributing Guidelines
 
-Hi there! Many thanks for taking an interest in improving nf-core/hic.
+Hi there!
+Many thanks for taking an interest in improving nf-core/hic.
 
-We try to manage the required tasks for nf-core/hic using GitHub issues, you probably came to this page when creating one. Please use the pre-filled template to save time.
+We try to manage the required tasks for nf-core/hic using GitHub issues, you probably came to this page when creating one.
+Please use the pre-filled template to save time.
 
-However, don't be put off by this template - other more general issues and suggestions are welcome! Contributions to the code are even more welcome ;)
-
-> If you need help using or modifying nf-core/hic then the best place to ask is on the pipeline channel on [Slack](https://nf-core-invite.herokuapp.com/).
+However, don't be put off by this template - other more general issues and suggestions are welcome!
+Contributions to the code are even more welcome ;)
 
+> If you need help using or modifying nf-core/hic then the best place to ask is on the nf-core
+Slack [#hic](https://nfcore.slack.com/channels/hic) channel ([join our Slack here](https://nf-co.re/join/slack)).
 
 
 ## Contribution workflow
-If you'd like to write some code for nf-core/hic, the standard workflow
-is as follows:
 
-1. Check that there isn't already an issue about your idea in the
-   [nf-core/hic issues](https://github.com/nf-core/hic/issues) to avoid
-   duplicating work.
+If you'd like to write some code for nf-core/hic, the standard workflow is as follows:
+
+1. Check that there isn't already an issue about your idea in the [nf-core/hic issues](https://github.com/nf-core/hic/issues) to avoid duplicating work
     * If there isn't one already, please create one so that others know you're working on this
-2. Fork the [nf-core/hic repository](https://github.com/nf-core/hic) to your GitHub account
+2. [Fork](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) the [nf-core/hic repository](https://github.com/nf-core/hic) to your GitHub account
 3. Make the necessary changes / additions within your forked repository
-4. Submit a Pull Request against the `dev` branch and wait for the code to be reviewed and merged.
-
-If you're not used to this workflow with git, you can start with some [basic docs from GitHub](https://help.github.com/articles/fork-a-repo/) or even their [excellent interactive tutorial](https://try.github.io/).
+4. Submit a Pull Request against the `dev` branch and wait for the code to be reviewed and merged
 
+If you're not used to this workflow with git, you can start with some [docs from GitHub](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests) or even their [excellent `git` resources](https://try.github.io/).
 
 ## Tests
-When you create a pull request with changes, [Travis CI](https://travis-ci.org/) will run automatic tests.
+
+When you create a pull request with changes, [GitHub Actions](https://github.com/features/actions) will run automatic tests.
 Typically, pull-requests are only fully reviewed when these tests are passing, though of course we can help out before then.
 
 There are typically two types of tests that run:
 
 ### Lint Tests
-The nf-core has a [set of guidelines](http://nf-co.re/guidelines) which all pipelines must adhere to.
+
+`nf-core` has a [set of guidelines](https://nf-co.re/developers/guidelines) which all pipelines must adhere to.
 To enforce these and ensure that all pipelines stay in sync, we have developed a helper tool which runs checks on the pipeline code. This is in the [nf-core/tools repository](https://github.com/nf-core/tools) and once installed can be run locally with the `nf-core lint <pipeline-directory>` command.
 
 If any failures or warnings are encountered, please follow the listed URL for more documentation.
 
 ### Pipeline Tests
-Each nf-core pipeline should be set up with a minimal set of test-data.
-Travis CI then runs the pipeline on this data to ensure that it exists successfully.
+
+Each `nf-core` pipeline should be set up with a minimal set of test-data.
+`GitHub Actions` then runs the pipeline on this data to ensure that it exits successfully.
 If there are any failures then the automated tests fail.
-These tests are run both with the latest available version of Nextflow and also the minimum required version that is stated in the pipeline code.
+These tests are run both with the latest available version of `Nextflow` and also the minimum required version that is stated in the pipeline code.
+
+## Patch
+
+: warning: Only in the unlikely and regretful event of a release happening with a bug.
+
+* On your own fork, make a new branch `patch` based on `upstream/master`.
+* Fix the bug, and bump version (X.Y.Z+1).
+* A PR should be made on `master` from patch to directly this particular bug.
 
 ## Getting help
-For further information/help, please consult the [nf-core/hic documentation](https://github.com/nf-core/hic#documentation) and don't hesitate to get in touch on the pipeline channel on [Slack](https://nf-core-invite.herokuapp.com/).
+For further information/help, please consult the [nf-core/hic documentation](https://nf-co.re/nf-core/hic/docs) and
+don't hesitate to get in touch on the nf-core Slack [#hic](https://nfcore.slack.com/channels/hic) channel
+([join our Slack here](https://nf-co.re/join/slack)).
+
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 8112c95f4b2e8d2c3aeda5b539529608fe9213e8..2b9203377a6365822d0f13f6a59f2496ae717fb1 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -1,31 +1,42 @@
+# nf-core/hic bug report
+
 Hi there!
 
-Thanks for telling us about a problem with the pipeline. Please delete this text and anything that's not relevant from the template below:
+Thanks for telling us about a problem with the pipeline.
+Please delete this text and anything that's not relevant from the template below:
+
+## Describe the bug
 
-#### Describe the bug
 A clear and concise description of what the bug is.
 
-#### Steps to reproduce
+## Steps to reproduce
+
 Steps to reproduce the behaviour:
+
 1. Command line: `nextflow run ...`
 2. See error: _Please provide your error message_
 
-#### Expected behaviour
+## Expected behaviour
+
 A clear and concise description of what you expected to happen.
 
-#### System:
- - Hardware: [e.g. HPC, Desktop, Cloud...]
- - Executor: [e.g. slurm, local, awsbatch...]
- - OS: [e.g. CentOS Linux, macOS, Linux Mint...]
- - Version [e.g. 7, 10.13.6, 18.3...]
+## System
+
+- Hardware: <!-- [e.g. HPC, Desktop, Cloud...] -->
+- Executor: <!-- [e.g. slurm, local, awsbatch...] -->
+- OS: <!-- [e.g. CentOS Linux, macOS, Linux Mint...] -->
+- Version <!-- [e.g. 7, 10.13.6, 18.3...] -->
+
+## Nextflow Installation
+
+- Version: <!-- [e.g. 19.10.0] -->
+
+## Container engine
 
-#### Nextflow Installation:
- - Version: [e.g. 0.31.0]
+- Engine: <!-- [e.g. Conda, Docker or Singularity] -->
+- version: <!-- [e.g. 1.0.0] -->
+- Image tag: <!-- [e.g. nfcore/hic:1.0.0] -->
 
-#### Container engine:
- - Engine: [e.g. Conda, Docker or Singularity]
- - version: [e.g. 1.0.0]
- - Image tag: [e.g. nfcore/hic:1.0.0]
+## Additional context
 
-#### Additional context
 Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
index 1f025b779cf127b420c972c1e385e4efcdd56321..57fa7f7f41368f73726974ef548162c957b7fd7d 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -1,16 +1,24 @@
+# nf-core/hic feature request
+
 Hi there!
 
-Thanks for suggesting a new feature for the pipeline! Please delete this text and anything that's not relevant from the template below:
+Thanks for suggesting a new feature for the pipeline!
+Please delete this text and anything that's not relevant from the template below:
+
+## Is your feature request related to a problem? Please describe
 
-#### Is your feature request related to a problem? Please describe.
 A clear and concise description of what the problem is.
+
 Ex. I'm always frustrated when [...]
 
-#### Describe the solution you'd like
+## Describe the solution you'd like
+
 A clear and concise description of what you want to happen.
 
-#### Describe alternatives you've considered
+## Describe alternatives you've considered
+
 A clear and concise description of any alternative solutions or features you've considered.
 
-#### Additional context
+## Additional context
+
 Add any other context about the feature request here.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 473c41dcc78813e0ed7867d92322be210da06b0c..50d7959aa9d49a9bc51a14c172917c904d2bafb9 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,15 +1,19 @@
-Many thanks to contributing to nf-core/hic!
+# nf-core/hic pull request
 
-Please fill in the appropriate checklist below (delete whatever is not relevant). These are the most common things requested on pull requests (PRs).
+Many thanks for contributing to nf-core/hic!
+
+Please fill in the appropriate checklist below (delete whatever is not relevant).
+These are the most common things requested on pull requests (PRs).
 
 ## PR checklist
- - [ ] This comment contains a description of changes (with reason)
- - [ ] If you've fixed a bug or added code that should be tested, add tests!
- - [ ] If necessary, also make a PR on the [nf-core/hic branch on the nf-core/test-datasets repo]( https://github.com/nf-core/test-datasets/pull/new/nf-core/hic)
- - [ ] Ensure the test suite passes (`nextflow run . -profile test,docker`).
- - [ ] Make sure your code lints (`nf-core lint .`).
- - [ ] Documentation in `docs` is updated
- - [ ] `CHANGELOG.md` is updated
- - [ ] `README.md` is updated
-
-**Learn more about contributing:** https://github.com/nf-core/hic/tree/master/.github/CONTRIBUTING.md
+
+- [ ] This comment contains a description of changes (with reason)
+- [ ] If you've fixed a bug or added code that should be tested, add tests!
+- [ ] If necessary, also make a PR on the [nf-core/hic branch on the nf-core/test-datasets repo](https://github.com/nf-core/test-datasets/pull/new/nf-core/hic)
+- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker`).
+- [ ] Make sure your code lints (`nf-core lint .`).
+- [ ] Documentation in `docs` is updated
+- [ ] `CHANGELOG.md` is updated
+- [ ] `README.md` is updated
+
+**Learn more about contributing:** [CONTRIBUTING.md](https://github.com/nf-core/hic/tree/master/.github/CONTRIBUTING.md)
\ No newline at end of file
diff --git a/.github/markdownlint.yml b/.github/markdownlint.yml
index e052a635aa7c2787e741207a069d9a400358ca6c..96b12a70398f6870ef306f4d8a5afcebc8f96ba8 100644
--- a/.github/markdownlint.yml
+++ b/.github/markdownlint.yml
@@ -1,9 +1,5 @@
 # Markdownlint configuration file
 default: true,
 line-length: false
-no-multiple-blanks: 0
-blanks-around-headers: false
-blanks-around-lists: false
-header-increment: false
 no-duplicate-header:
     siblings_only: true
diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e95804c7cb51f306a7b2bf2028149c64358af705
--- /dev/null
+++ b/.github/workflows/branch.yml
@@ -0,0 +1,16 @@
+name: nf-core branch protection
+# This workflow is triggered on PRs to master branch on the repository
+# It fails when someone tries to make a PR against the nf-core `master` branch instead of `dev`
+on:
+  pull_request:
+    branches:
+    - master
+
+jobs:
+  test:
+    runs-on: ubuntu-18.04
+    steps:
+      # PRs are only ok if coming from an nf-core `dev` branch or a fork `patch` branch
+      - name: Check PRs
+        run: |
+          { [[ $(git remote get-url origin) == *nf-core/hic ]] && [[ ${GITHUB_HEAD_REF} = "dev" ]]; } || [[ ${GITHUB_HEAD_REF} == "patch" ]]
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a7f734daefe5cd9b35292fd669e0ce404ecf397e
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,30 @@
+name: nf-core CI
+# This workflow is triggered on pushes and PRs to the repository.
+# It runs the pipeline with the minimal test dataset to check that it completes without any syntax errors
+on: [push, pull_request]
+
+jobs:
+  test:
+    env:
+      NXF_VER: ${{ matrix.nxf_ver }}
+      NXF_ANSI_LOG: false
+    runs-on: ubuntu-latest
+    strategy:
+      matrix:
+        # Nextflow versions: check pipeline minimum and current latest
+        nxf_ver: ['19.10.0', '']
+    steps:
+      - uses: actions/checkout@v2
+      - name: Install Nextflow
+        run: |
+          wget -qO- get.nextflow.io | bash
+          sudo mv nextflow /usr/local/bin/
+      - name: Pull docker image
+        run: |
+          docker pull nfcore/hic:dev
+          docker tag nfcore/hic:dev nfcore/hic:1.2.0
+      - name: Run pipeline with test data
+        run: |
+          # nf-core: You can customise CI pipeline run tests as required
+          # (eg. adding multiple test runs with different parameters)
+          nextflow run ${GITHUB_WORKSPACE} -profile test,docker
diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1e0827a800dcd520582e8f89d2325cbce15a6b12
--- /dev/null
+++ b/.github/workflows/linting.yml
@@ -0,0 +1,50 @@
+name: nf-core linting
+# This workflow is triggered on pushes and PRs to the repository.
+# It runs the `nf-core lint` and markdown lint tests to ensure that the code meets the nf-core guidelines
+on:
+  push:
+  pull_request:
+  release:
+    types: [published]
+
+jobs:
+  Markdown:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v2
+      - uses: actions/setup-node@v1
+        with:
+          node-version: '10'
+      - name: Install markdownlint
+        run: npm install -g markdownlint-cli
+      - name: Run Markdownlint
+        run: markdownlint ${GITHUB_WORKSPACE} -c ${GITHUB_WORKSPACE}/.github/markdownlint.yml
+  YAML:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v1
+      - uses: actions/setup-node@v1
+        with:
+          node-version: '10'
+      - name: Install yaml-lint
+        run: npm install -g yaml-lint
+      - name: Run yaml-lint
+        run: yamllint $(find ${GITHUB_WORKSPACE} -type f -name "*.yml")
+  nf-core:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v2
+      - name: Install Nextflow
+        run: |
+          wget -qO- get.nextflow.io | bash
+          sudo mv nextflow /usr/local/bin/
+      - uses: actions/setup-python@v1
+        with:
+          python-version: '3.6'
+          architecture: 'x64'
+      - name: Install dependencies
+        run: |
+          python -m pip install --upgrade pip
+          pip install nf-core
+      - name: Run nf-core lint
+        run: nf-core lint ${GITHUB_WORKSPACE}
diff --git a/.gitignore b/.gitignore
index 5b54e3e6c257de1e963395161372e1a2ca110fe7..6354f3708fa7c35477f398801673e469c12726ea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,5 +3,6 @@ work/
 data/
 results/
 .DS_Store
-tests/test_data
+tests/
+testing/
 *.pyc
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index b3e7a99f81be8e2ce96c6be8f54801a093e76cc4..0000000000000000000000000000000000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-sudo: required
-language: python
-jdk: openjdk8
-services: docker
-python: '3.6'
-cache: pip
-matrix:
-  fast_finish: true
-
-before_install:
-  # PRs to master are only ok if coming from dev branch
-  - '[ $TRAVIS_PULL_REQUEST = "false" ] || [ $TRAVIS_BRANCH != "master" ] || ([ $TRAVIS_PULL_REQUEST_SLUG = $TRAVIS_REPO_SLUG ] && [ $TRAVIS_PULL_REQUEST_BRANCH = "dev" ])'
-  # Pull the docker image first so the test doesn't wait for this
-  - docker pull nfcore/hic:dev
-  # Fake the tag locally so that the pipeline runs properly
-  # Looks weird when this is :dev to :dev, but makes sense when testing code for a release (:dev to :1.0.1)
-  - docker tag nfcore/hic:dev nfcore/hic:1.0.0
-
-install:
-  # Install Nextflow
-  - mkdir /tmp/nextflow && cd /tmp/nextflow
-  - wget -qO- get.nextflow.io | bash
-  - sudo ln -s /tmp/nextflow/nextflow /usr/local/bin/nextflow
-  # Install nf-core/tools
-  - pip install --upgrade pip
-  - pip install nf-core
-  # Reset
-  - mkdir ${TRAVIS_BUILD_DIR}/tests && cd ${TRAVIS_BUILD_DIR}/tests
-  # Install markdownlint-cli
-  - sudo apt-get install npm && npm install -g markdownlint-cli
-
-env:
-  - NXF_VER='0.32.0' # Specify a minimum NF version that should be tested and work
-  - NXF_VER='' # Plus: get the latest NF version and check that it works
-
-script:
-  # Lint the pipeline code
-  - nf-core lint ${TRAVIS_BUILD_DIR}
-  # Lint the documentation
-  - markdownlint ${TRAVIS_BUILD_DIR} -c ${TRAVIS_BUILD_DIR}/.github/markdownlint.yml
-  # Run the pipeline with the test profile
-  - nextflow run ${TRAVIS_BUILD_DIR} -profile test,docker
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 08f3ef05433140b43ec55ce8bc9b67298f5dc710..685212830b29d24e0c02269c104f83284d4994c1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,14 +1,90 @@
 # nf-core/hic: Changelog
 
-## v1.0 - 2019-05-06
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
 
-First version of nf-core Hi-C pipeline which is a Nextflow implementation of the [HiC-Pro pipeline](https://github.com/nservant/HiC-Pro/).
+## v1.2.0 - 2020-06-18
+
+### `Added`
+
+* Bump v1.2.0
+* Merge template nf-core 1.9
+* Move some options to camel_case
+* Update python scripts for python3
+* Update conda environment file
+  * python base `2.7.15` > `3.7.6`
+  * pip `19.1` > `20.0.1`
+  * scipy `1.2.1` > `1.4.1`
+  * numpy `1.16.3` > `1.18.1`
+  * bx-python `0.8.2` > `0.8.8`
+  * pysam `0.15.2` > `0.15.4`
+  * cooler `0.8.5` > `0.8.6`
+  * multiqc `1.7` > `1.8`
+  * iced `0.5.1` > `0.5.6`
+  * *_New_* pymdown-extensions `7.1`
+  * *_New_* hicexplorer `3.4.3`
+  * *_New_* bioconductor-hitc `1.32.0`
+  * *_New_* r-optparse `1.6.6`
+  * *_New_* ucsc-bedgraphtobigwig `377`
+  * *_New_* cython `0.29.19`
+  * *_New_* cooltools `0.3.2`
+  * *_New_* fanc `0.8.30`
+  * *_Removed_* r-markdown
+
+### `Fixed`
+
+* Fix error in doc for Arima kit usage
+* Sort output of `get_valid_interaction` process as the input files of `remove_duplicates`
+are expected to be sorted (sort -m)
+
+### `Deprecated`
+
+* Command line options converted to `camel_case`:
+  * `--skipMaps` > `--skip_maps`
+  * `--skipIce` > `--skip_ice`
+  * `--skipCool` > `--skip_cool`
+  * `--skipMultiQC` > `--skip_multiqc`
+  * `--saveReference` > `--save_reference`
+  * `--saveAlignedIntermediates` > `--save_aligned_intermediates`
+  * `--saveInteractionBAM` > `--save_interaction_bam`
+
+## v1.1.1 - 2020-04-02
+
+### `Fixed`
+
+* Fix bug in tag. Remove '['
+
+## v1.1.0 - 2019-10-15
+
+### `Added`
+
+* Update hicpro2higlass with `-p` parameter
+* Support 'N' base motif in restriction/ligation sites
+* Support multiple restriction enzymes/ligattion sites (comma separated) ([#31](https://github.com/nf-core/hic/issues/31))
+* Add --saveInteractionBAM option
+* Add DOI ([#29](https://github.com/nf-core/hic/issues/29))
+* Update manual ([#28](https://github.com/nf-core/hic/issues/28))
+
+### `Fixed`
+
+* Fix bug for reads extension `_1`/`_2` ([#30](https://github.com/nf-core/hic/issues/30))
+
+## v1.0 - [2019-05-06]
+
+Initial release of nf-core/hic, created with the [nf-core](http://nf-co.re/) template.
+
+### `Added`
+
+First version of nf-core Hi-C pipeline which is a Nextflow implementation of
+the [HiC-Pro pipeline](https://github.com/nservant/HiC-Pro/).
 Note that all HiC-Pro functionalities are not yet all implemented.
-The current version supports most protocols including Hi-C, in situ Hi-C, DNase Hi-C, Micro-C, capture-C or HiChip data.
+The current version supports most protocols including Hi-C, in situ Hi-C,
+DNase Hi-C, Micro-C, capture-C or HiChip data.
 
 In summary, this version allows :
 
-* Automatic detection and generation of annotation files based on igenomes if not provided.
+* Automatic detection and generation of annotation files based on igenomes
+if not provided.
 * Two-steps alignment of raw sequencing reads
 * Reads filtering and detection of valid interaction products
 * Generation of raw contact matrices for a set of resolutions
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 09226d0d8d896bbc3bdb632476430d6cad4b0aa7..496ad3b59f0bc2e34e2a69f8d3b4cc760be51616 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -2,11 +2,17 @@
 
 ## Our Pledge
 
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project
+and our community a harassment-free experience for everyone, regardless of
+age, body size, disability, ethnicity, gender identity and expression, level
+of experience, nationality, personal appearance, race, religion, or sexual
+identity and orientation.
 
 ## Our Standards
 
-Examples of behavior that contributes to creating a positive environment include:
+Examples of behavior that contributes to creating a positive environment
+include:
 
 * Using welcoming and inclusive language
 * Being respectful of differing viewpoints and experiences
@@ -16,31 +22,55 @@ Examples of behavior that contributes to creating a positive environment include
 
 Examples of unacceptable behavior by participants include:
 
-* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* The use of sexualized language or imagery and unwelcome sexual attention
+or advances
 * Trolling, insulting/derogatory comments, and personal or political attacks
 * Public or private harassment
-* Publishing others' private information, such as a physical or electronic address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a professional setting
+* Publishing others' private information, such as a physical or electronic
+address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+professional setting
 
 ## Our Responsibilities
 
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
 
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
 
 ## Scope
 
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an
+appointed representative at an online or offline event. Representation of a
+project may be further defined and clarified by project maintainers.
 
 ## Enforcement
 
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team on [Slack](https://nf-core-invite.herokuapp.com/). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team on
+[Slack](https://nf-co.re/join/slack). The project team will review
+and investigate all complaints, and will respond in a way that it deems
+appropriate to the circumstances. The project team is obligated to maintain
+confidentiality with regard to the reporter of an incident. Further details
+of specific enforcement policies may be posted separately.
 
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
 
 ## Attribution
 
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.4, available at
+[http://contributor-covenant.org/version/1/4][version]
 
 [homepage]: http://contributor-covenant.org
 [version]: http://contributor-covenant.org/version/1/4/
diff --git a/Dockerfile b/Dockerfile
index 06374cf95db6f1a3a68e8c45ab48d2d3ac1d2c2f..5aecaa6236984f7b4a2aa7b79422b454b6e74f9b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,10 +1,14 @@
-FROM nfcore/base
+FROM nfcore/base:1.9
 LABEL authors="Nicolas Servant" \
-      description="Docker image containing all requirements for nf-core/hic pipeline"
+      description="Docker image containing all software requirements for the nf-core/hic pipeline"
 
 ## Install gcc for pip iced install
 RUN apt-get update && apt-get install -y gcc g++ && apt-get clean -y
 
 COPY environment.yml /
 RUN conda env create -f /environment.yml && conda clean -a
-ENV PATH /opt/conda/envs/nf-core-hic-1.0.0/bin:$PATH
+ENV PATH /opt/conda/envs/nf-core-hic-1.2.0/bin:$PATH
+
+# Dump the details of the installed packages to a file for posterity
+RUN conda env export --name nf-core-hic-1.2.0 > nf-core-hic-1.2.0.yml
+
diff --git a/README.md b/README.md
index 37692cf5d7e8901feeb77f29da73ad500ca46cc4..be3889dd90f80bc31850ce697468ffb6208be3d3 100644
--- a/README.md
+++ b/README.md
@@ -2,24 +2,36 @@
 
 **Analysis of Chromosome Conformation Capture data (Hi-C)**.
 
-[![Build Status](https://travis-ci.com/nf-core/hic.svg?branch=master)](https://travis-ci.com/nf-core/hic)
-[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.32.0-brightgreen.svg)](https://www.nextflow.io/)
+[![GitHub Actions CI Status](https://github.com/nf-core/hic/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/hic/actions)
+[![GitHub Actions Linting Status](https://github.com/nf-core/hic/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/hic/actions)
+[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A519.10.0-brightgreen.svg)](https://www.nextflow.io/)
 
 [![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](http://bioconda.github.io/)
 [![Docker](https://img.shields.io/docker/automated/nfcore/hic.svg)](https://hub.docker.com/r/nfcore/hic)
-![Singularity Container available](
-https://img.shields.io/badge/singularity-available-7E4C74.svg)
 
-### Introduction
-This pipeline is based on the [HiC-Pro workflow](https://github.com/nservant/HiC-Pro).
-It was designed to process Hi-C data from raw fastq files (paired-end Illumina data) to normalized contact maps.
-The current version supports most protocols, including digestion protocols as well as protocols that do not require restriction enzymes such as DNase Hi-C.
-In practice, this workflow was successfully applied to many data-sets including dilution Hi-C, in situ Hi-C, DNase Hi-C, Micro-C, capture-C, capture Hi-C or HiChip data.
+[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.2669513.svg)](https://doi.org/10.5281/zenodo.2669513)
 
-The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible.
+## Introduction
 
-### Pipeline summary
-1. Mapping using a two steps strategy to rescue reads spanning the ligation sites (bowtie2)
+This pipeline is based on the
+[HiC-Pro workflow](https://github.com/nservant/HiC-Pro).
+It was designed to process Hi-C data from raw FastQ files (paired-end Illumina
+data) to normalized contact maps.
+The current version supports most protocols, including digestion protocols as
+well as protocols that do not require restriction enzymes such as DNase Hi-C.
+In practice, this workflow was successfully applied to many data-sets including
+dilution Hi-C, in situ Hi-C, DNase Hi-C, Micro-C, capture-C, capture Hi-C or
+HiChip data.
+
+The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool
+to run tasks across multiple compute infrastructures in a very portable manner.
+It comes with docker / singularity containers making installation trivial and
+results highly reproducible.
+
+## Pipeline summary
+
+1. Mapping using a two steps strategy to rescue reads spanning the ligation
+sites (bowtie2)
 2. Detection of valid interaction products
 3. Duplicates removal
 4. Create genome-wide contact maps at various resolution
@@ -27,17 +39,77 @@ The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool
 6. Quality controls and report (MultiQC)
 7. Addition export for visualisation and downstream analysis (cooler)
 
-### Documentation
-The nf-core/hic pipeline comes with documentation about the pipeline, found in the `docs/` directory:
+## Quick Start
+
+i. Install [`nextflow`](https://nf-co.re/usage/installation)
+
+ii. Install either [`Docker`](https://docs.docker.com/engine/installation/)
+or [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/)
+for full pipeline reproducibility (please only use [`Conda`](https://conda.io/miniconda.html)
+as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))
+
+iii. Download the pipeline and test it on a minimal dataset with a single command
+
+```bash
+nextflow run nf-core/hic -profile test,<docker/singularity/conda/institute>
+```
+
+> Please check [nf-core/configs](https://github.com/nf-core/configs#documentation)
+to see if a custom config file to run nf-core pipelines already exists for your Institute.
+If so, you can simply use `-profile <institute>` in your command.
+This will enable either `docker` or `singularity` and set the appropriate execution
+settings for your local compute environment.
+
+iv. Start running your own analysis!
+
+```bash
+nextflow run nf-core/hic -profile <docker/singularity/conda/institute> --reads '*_R{1,2}.fastq.gz' --genome GRCh37
+```
+
+See [usage docs](docs/usage.md) for all of the available options when running the pipeline.
 
-1. [Installation](docs/installation.md)
+## Documentation
+
+The nf-core/hic pipeline comes with documentation about the pipeline,
+found in the `docs/` directory:
+
+1. [Installation](https://nf-co.re/usage/installation)
 2. Pipeline configuration
-    * [Local installation](docs/configuration/local.md)
-    * [Adding your own system](docs/configuration/adding_your_own.md)
-    * [Reference genomes](docs/configuration/reference_genomes.md)  
+    * [Local installation](https://nf-co.re/usage/local_installation)
+    * [Adding your own system config](https://nf-co.re/usage/adding_own_config)
+    * [Reference genomes](https://nf-co.re/usage/reference_genomes)
 3. [Running the pipeline](docs/usage.md)
 4. [Output and how to interpret the results](docs/output.md)
-5. [Troubleshooting](docs/troubleshooting.md)
+5. [Troubleshooting](https://nf-co.re/usage/troubleshooting)
+
+For further information or help, don't hesitate to get in touch on
+[Slack](https://nfcore.slack.com/channels/hic).
+You can join with [this invite](https://nf-co.re/join/slack).
+
+## Credits
 
-### Credits
 nf-core/hic was originally written by Nicolas Servant.
+
+## Contributions and Support
+
+If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).
+
+For further information or help, don't hesitate to get in touch on
+[Slack](https://nfcore.slack.com/channels/hic) (you can join with
+[this invite](https://nf-co.re/join/slack)).
+
+## Citation
+
+If you use nf-core/hic for your analysis, please cite it using the following
+doi: [10.5281/zenodo.2669513](https://doi.org/10.5281/zenodo.2669513)
+
+You can cite the `nf-core` publication as follows:
+
+> **The nf-core framework for community-curated bioinformatics pipelines.**
+>
+> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg,
+Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.
+>
+> _Nat Biotechnol._ 2020 Feb 13.
+doi:[10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).  
+> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ)
diff --git a/assets/email_template.html b/assets/email_template.html
index bf19807e4a21c86f969a7ed5570b479574d9e4ee..177bccd2d802d3a11e2e6ca606e72ac19968126b 100644
--- a/assets/email_template.html
+++ b/assets/email_template.html
@@ -11,6 +11,8 @@
 <body>
 <div style="font-family: Helvetica, Arial, sans-serif; padding: 30px; max-width: 800px; margin: 0 auto;">
 
+<img src="cid:nfcorepipelinelogo">
+
 <h1>nf-core/hic v${version}</h1>
 <h2>Run Name: $runName</h2>
 
diff --git a/assets/email_template.txt b/assets/email_template.txt
index 6c85add607a47589da20df83c6892bcfe5e04f1d..a951c5e7f965fa5829707fc84f4351495995190f 100644
--- a/assets/email_template.txt
+++ b/assets/email_template.txt
@@ -1,6 +1,12 @@
-========================================
- nf-core/hic v${version}
-========================================
+----------------------------------------------------
+                                        ,--./,-.
+        ___     __   __   __   ___     /,-._.--~\\
+  |\\ | |__  __ /  ` /  \\ |__) |__         }  {
+  | \\| |       \\__, \\__/ |  \\ |___     \\`-._,-`-,
+                                        `._,._,'
+  nf-core/hic v${version}
+----------------------------------------------------
+
 Run Name: $runName
 
 <% if (success){
diff --git a/assets/multiqc_config.yaml b/assets/multiqc_config.yaml
index d425b46caa3f6d032a2e5ed340788e583214d447..41468cab303a5894aa01e0823790b22cb44c95cd 100644
--- a/assets/multiqc_config.yaml
+++ b/assets/multiqc_config.yaml
@@ -3,7 +3,9 @@ report_comment: >
     analysis pipeline. For information about how to interpret these results, please see the
     <a href="https://github.com/nf-core/hic" target="_blank">documentation</a>.
 report_section_order:
-    nf-core/hic-software-versions:
+    software_versions:
         order: -1000
+    nf-core-hic-summary:
+        order: -1001
 
 export_plots: true
diff --git a/assets/nf-core-hic_logo.png b/assets/nf-core-hic_logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..6b364161664e70224fac3a83fb9f02ed0acbd9f8
Binary files /dev/null and b/assets/nf-core-hic_logo.png differ
diff --git a/assets/nf-core-hic_social_preview.png b/assets/nf-core-hic_social_preview.png
new file mode 100644
index 0000000000000000000000000000000000000000..54784f0201bec3769e57e00a9a4f3c69c64dc055
Binary files /dev/null and b/assets/nf-core-hic_social_preview.png differ
diff --git a/assets/nf-core-hic_social_preview.svg b/assets/nf-core-hic_social_preview.svg
new file mode 100644
index 0000000000000000000000000000000000000000..bc2e2a33b8b1a8342e4a866e940ca35062e6a3b7
--- /dev/null
+++ b/assets/nf-core-hic_social_preview.svg
@@ -0,0 +1,448 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="1280"
+   height="640"
+   viewBox="0 0 338.66666 169.33333"
+   version="1.1"
+   id="svg8"
+   inkscape:version="0.92.3 (2405546, 2018-03-11)"
+   sodipodi:docname="social_preview_image_hic.svg"
+   inkscape:export-filename="social_preview_image.png"
+   inkscape:export-xdpi="96"
+   inkscape:export-ydpi="96">
+  <defs
+     id="defs2">
+    <clipPath
+       id="d">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9"
+         d="M 0,266 H 1022 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="c">
+      <path
+         inkscape:connector-curvature="0"
+         id="path12"
+         d="m 280.17,136.33 -21.5,-21.584 h 61 v 21.584 z" />
+    </clipPath>
+    <linearGradient
+       gradientUnits="userSpaceOnUse"
+       gradientTransform="matrix(47.34875,36.9925,-36.9925,47.34875,344.325,162.1875)"
+       x2="1"
+       id="a">
+      <stop
+         id="stop15"
+         offset="0"
+         stop-color="#0c542a" />
+      <stop
+         id="stop17"
+         offset=".21472"
+         stop-color="#0c542a" />
+      <stop
+         id="stop19"
+         offset=".57995"
+         stop-color="#25af64" />
+      <stop
+         id="stop21"
+         offset=".84663"
+         stop-color="#25af64" />
+      <stop
+         id="stop23"
+         offset="1"
+         stop-color="#25af64" />
+    </linearGradient>
+    <clipPath
+       id="b">
+      <path
+         inkscape:connector-curvature="0"
+         id="path26"
+         d="M 0,266 H 1022 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath202"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path204"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath158"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path160"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath86"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path88"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath94"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path96"
+         d="M 804.509,211 H 968.795 V 114.019 H 804.509 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath110"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path112"
+         d="M 804.597,506 H 968.883 V 409.019 H 804.597 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath126"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path128"
+         d="M 133.598,209 H 297.883 V 112.019 H 133.598 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath142"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path144"
+         d="M 133.686,504 H 297.972 V 407.019 H 133.686 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath54"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path56-6"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath30"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path32"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath202-3"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path204-6"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath158-7"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path160-5"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath86-3"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path88-5"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath94-6"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path96-2"
+         d="M 804.509,211 H 968.795 V 114.019 H 804.509 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath110-9"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path112-1"
+         d="M 804.597,506 H 968.883 V 409.019 H 804.597 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath126-2"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path128-7"
+         d="M 133.598,209 H 297.883 V 112.019 H 133.598 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath142-0"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path144-9"
+         d="M 133.686,504 H 297.972 V 407.019 H 133.686 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath54-3"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path56-60"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath30-6"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path32-2"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <linearGradient
+       gradientUnits="userSpaceOnUse"
+       gradientTransform="matrix(47.34875,36.9925,-36.9925,47.34875,344.325,162.1875)"
+       x2="1"
+       id="a-3">
+      <stop
+         id="stop15-61"
+         offset="0"
+         stop-color="#0c542a" />
+      <stop
+         id="stop17-29"
+         offset=".21472"
+         stop-color="#0c542a" />
+      <stop
+         id="stop19-3"
+         offset=".57995"
+         stop-color="#25af64" />
+      <stop
+         id="stop21-19"
+         offset=".84663"
+         stop-color="#25af64" />
+      <stop
+         id="stop23-4"
+         offset="1"
+         stop-color="#25af64" />
+    </linearGradient>
+    <linearGradient
+       gradientUnits="userSpaceOnUse"
+       gradientTransform="matrix(14.322136,11.189559,-11.189559,14.322136,103.39117,-43.22521)"
+       x2="1"
+       id="f">
+      <stop
+         id="stop12"
+         offset="0"
+         stop-color="#0c542a" />
+      <stop
+         id="stop14"
+         offset=".21472"
+         stop-color="#0c542a" />
+      <stop
+         id="stop16"
+         offset=".57995"
+         stop-color="#25af64" />
+      <stop
+         id="stop18"
+         offset=".84663"
+         stop-color="#25af64" />
+      <stop
+         id="stop20"
+         offset="1"
+         stop-color="#25af64" />
+    </linearGradient>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="0.7"
+     inkscape:cx="94.827004"
+     inkscape:cy="267.59341"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1920"
+     inkscape:window-height="1012"
+     inkscape:window-x="1920"
+     inkscape:window-y="759"
+     inkscape:window-maximized="1"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     units="px" />
+  <metadata
+     id="metadata5">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(31.749994,-15.785728)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot308"
+       style="font-style:normal;font-weight:normal;font-size:37.33333206px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
+       transform="matrix(0.26458333,0,0,0.26458333,-34.517006,20.683034)"><flowRegion
+         id="flowRegion310"
+         style="font-size:37.33333206px;text-align:center;text-anchor:middle"><rect
+           id="rect312"
+           width="1031.3657"
+           height="101.01524"
+           x="135.36044"
+           y="417.76645"
+           style="font-size:37.33333206px;text-align:center;text-anchor:middle" /></flowRegion><flowPara
+         style="font-size:32px;text-align:center;text-anchor:middle"
+         id="flowPara902">Analysis of Chromosome Conformation Capture data (Hi-C)</flowPara></flowRoot>    <g
+       id="g603"
+       transform="matrix(0.44611981,0,0,0.44611981,44.334855,81.689003)">
+      <flowRoot
+         xml:space="preserve"
+         id="flowRoot1021"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:40px;line-height:1.25;font-family:'Maven Pro';-inkscape-font-specification:'Maven Pro Bold';letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+         transform="matrix(1.9231376,0,0,1.9231376,-514.12361,-525.99533)"><flowRegion
+           id="flowRegion1023"
+           style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-family:'Maven Pro';-inkscape-font-specification:'Maven Pro Bold'"><rect
+             id="rect1025"
+             width="275.99985"
+             height="102.85306"
+             x="274.76151"
+             y="267.25372"
+             style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-family:'Maven Pro';-inkscape-font-specification:'Maven Pro Bold'" /></flowRegion><flowPara
+           id="flowPara1027"
+           style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-family:'Maven Pro';-inkscape-font-specification:'Maven Pro Bold'">hic</flowPara><flowPara
+           style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-family:'Maven Pro';-inkscape-font-specification:'Maven Pro Bold'"
+           id="flowPara982" /></flowRoot>    </g>
+    <g
+       id="g551"
+       transform="matrix(0.44611981,0,0,0.44611981,44.677261,81.689003)">
+      <path
+         style="fill:#24af63;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path24"
+         d="m 401.03289,-44.148517 v 1.35913 c -0.0378,0 -0.0378,0 -0.0755,0.03775 l -0.67956,-0.566304 c -1.51015,-1.283623 -3.13355,-2.416231 -4.94572,-3.322317 -0.30203,-0.151014 -0.60406,-0.339782 -0.94384,-0.415289 -0.0378,-0.03775 -0.0755,-0.07551 -0.11326,-0.07551 -0.67957,-0.264275 -1.35913,-0.490796 -2.07645,-0.679564 -1.5479,-0.339783 -3.0958,-0.377536 -4.64369,-0.07551 -2.00094,0.41529 -3.77536,1.283623 -5.47428,2.416231 -1.66115,1.132607 -3.1713,2.453983 -4.56818,3.850866 -0.26428,0.264276 -0.26428,0.264276 -0.41529,-0.07551 -0.75507,-1.547897 -1.58565,-3.058041 -2.605,-4.454924 -0.79282,-1.057101 -1.66116,-2.038694 -2.79376,-2.718259 -1.13261,-0.717318 -2.37848,-0.981594 -3.69986,-0.641811 -1.6234,0.377536 -2.94478,1.359129 -4.19065,2.41623 -0.75507,0.566304 -1.43463,1.245869 -2.1142,1.88768 -0.64181,0.566304 -1.24587,1.132608 -1.88768,1.698912 -0.11326,0.11326 -0.18877,0.11326 -0.30203,0 -0.64181,-0.679565 -1.32137,-1.283623 -2.1142,-1.698912 -1.17036,-0.641811 -2.37847,-0.717319 -3.62434,-0.302029 -1.0571,0.339782 -1.96319,0.906086 -2.90703,1.510144 -0.37754,0.226521 -0.71732,0.490797 -1.09485,0.679565 v -0.07551 c 0.0378,-0.07551 0.0378,-0.151014 0.0378,-0.226521 0.0755,-1.661158 0.18877,-3.36007 0.45304,-5.021228 0.37754,-2.30297 0.94384,-4.530432 1.96319,-6.60688 0.75507,-1.547897 1.77442,-2.982534 3.05804,-4.152895 1.69891,-1.547898 3.69985,-2.529491 5.88956,-3.133549 2.37848,-0.679565 4.79471,-0.981593 7.24869,-1.094854 0.9816,-0.03775 1.92544,-0.07551 2.90703,-0.113261 0.4908,0.188768 0.98159,0.302029 1.47239,0.453043 0.71732,0.226522 1.43464,0.453043 2.15195,0.641811 0.37754,0.151015 0.71732,0.264276 1.09486,0.41529 1.39688,0.490797 2.75601,1.132608 3.92637,2.076448 0.30203,0.226521 0.60406,0.490796 0.90609,0.755072 -0.0755,-0.226522 -0.15102,-0.41529 -0.22652,-0.641811 -0.52855,-1.396884 -1.32138,-2.529491 -2.605,-3.322317 -0.52855,-0.339782 -1.09485,-0.566304 -1.66116,-0.868333 0.0378,0 0.11326,0 0.15102,-0.03775 1.69891,-0.302029 3.43557,-0.453043 5.17224,-0.528551 1.47239,-0.07551 2.98253,-0.03775 4.45492,0.113261 1.73667,0.188768 3.43558,0.528551 5.05898,1.132608 2.41623,0.906086 4.49268,2.302969 6.11609,4.341664 1.39688,1.774419 2.30297,3.813113 2.86927,6.002821 0.5663,2.151955 0.79283,4.379418 0.86833,6.60688 -0.0378,0.792825 -0.0378,1.623404 -0.0378,2.453983 z"
+         class="st0" />
+      <path
+         style="fill:#ecdc86;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path26-8"
+         d="m 401.03289,-46.640254 h 0.0378 v 2.491737 h -0.0378 z"
+         class="st4" />
+      <path
+         style="fill:#a0918f;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path28"
+         d="m 387.25283,-82.468416 v 0.03775 h -1.69891 v -0.03775 z"
+         class="st5" />
+      <path
+         style="fill:#24af63;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path30"
+         d="m 388.34768,-11.340642 c 0.94384,-0.377536 1.84993,-0.868332 2.75601,-1.359129 1.0571,-0.641811 2.0387,-1.321376 3.02029,-2.038694 0.83058,-0.604058 1.62341,-1.245869 2.41623,-1.925434 l 1.35913,-1.132608 c 0.0755,-0.07551 0.0755,-0.03775 0.11326,0.03775 0.15102,0.604058 0.26428,1.245869 0.41529,1.88768 0.18877,0.868333 0.30203,1.736665 0.41529,2.604998 0.11326,1.019347 0.15102,2.000941 0.0755,3.020288 -0.0755,1.2458683 -0.30203,2.491737 -0.75507,3.6620985 -0.26428,0.7173183 -0.60406,1.396883 -1.01935,2.0386942 -0.52855,0.8305791 -1.13261,1.585651 -1.88768,2.2274621 -0.86833,0.7928255 -1.84993,1.47239026 -2.90703,2.0386942 -1.39688,0.79282552 -2.90702,1.3591295 -4.45492,1.8121726 -1.47239,0.4530432 -2.98254,0.7550719 -4.49268,1.0193471 -0.41529,0.075507 -0.83058,0.1510144 -1.24587,0.1510144 -0.86833,-0.037754 -1.73666,-0.2265216 -2.56724,-0.5285504 -1.24587,-0.4907967 -2.34073,-1.24586863 -3.58659,-1.81217257 -0.67957,-0.30202876 -1.35913,-0.52855034 -2.07645,-0.56630394 -0.94384,-0.0755072 -1.73667,0.26427518 -2.41623,0.90608631 l -1.47239,1.4723902 c -0.83058,0.8305792 -1.81218,1.3968831 -2.98254,1.5856511 -0.79282,0.1132608 -1.54789,0.075507 -2.34072,-0.037754 -1.09485,-0.1510144 -2.15195,-0.4152896 -3.1713,-0.7173183 -1.51015,-0.4907968 -2.90703,-1.1703615 -4.19065,-2.15195502 -1.20812,-0.9060863 -2.15196,-2.03869418 -2.94478,-3.32231648 -0.79283,-1.3213758 -1.24587,-2.7182589 -1.58565,-4.1906491 -0.15102,-0.6418112 -0.22652,-1.3213759 -0.30203,-1.963187 -0.11326,-0.9438399 -0.15102,-1.8499259 -0.11326,-2.7937659 0.0378,-1.321376 0.15101,-2.604998 0.33978,-3.926374 0.15101,0.151015 0.30203,0.264275 0.41529,0.377536 0.83058,0.755072 1.77442,1.434637 2.75601,2.038694 1.09486,0.641811 2.30297,1.170362 3.54884,1.510144 0.83058,0.226522 1.69891,0.377536 2.56724,0.41529 0.71732,0.07551 1.43464,0.07551 2.15196,0.03775 0.83058,-0.03775 1.69891,-0.11326 2.52949,-0.339782 0.11326,0 0.22652,0 0.30203,-0.03775 0.75507,-0.151014 1.51014,-0.339782 2.26522,-0.566304 0.86833,-0.264275 1.69891,-0.52855 2.52949,-0.830579 0.79282,-0.302028 1.6234,-0.641811 2.41623,-0.981593 0.22652,-0.113261 0.37753,-0.07551 0.5663,0.07551 1.32138,1.019347 2.75601,1.88768 4.30391,2.529491 1.73667,0.679565 3.51108,1.019347 5.36101,0.868333 1.35913,-0.264276 2.64275,-0.604058 3.88862,-1.094855 z"
+         class="st0" />
+      <path
+         style="fill:#ecdc86;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path32-3"
+         d="m 388.34768,-11.340642 c -1.24587,0.490797 -2.52949,0.830579 -3.85087,0.94384 -1.84992,0.151015 -3.62434,-0.188768 -5.36101,-0.868332 -1.54789,-0.604058 -2.98253,-1.472391 -4.30391,-2.529491 -0.18876,-0.151015 -0.33978,-0.188768 -0.5663,-0.07551 -0.79283,0.339783 -1.58565,0.679565 -2.41623,0.981594 -0.83058,0.302029 -1.66116,0.604057 -2.52949,0.830579 -0.75507,0.226522 -1.51014,0.377536 -2.26522,0.566304 -0.11326,0.03775 -0.22652,0.03775 -0.30203,0.03775 0.26428,-0.302029 0.52855,-0.604058 0.79283,-0.906087 1.0571,-1.208115 1.81217,-2.604998 2.22746,-4.190649 0.60406,-2.114201 1.20812,-4.266156 1.73667,-6.418111 0.37753,-1.585651 0.67956,-3.171302 0.90608,-4.794707 0.15102,-1.170361 0.37754,-5.625286 0.30203,-6.682386 -0.18877,-3.24681 -0.90608,-6.342605 -2.22746,-9.325139 -0.79283,-1.774419 -2.15195,-2.982534 -4.03963,-3.47333 -0.83058,-0.226522 -1.66116,-0.151015 -2.45399,0.11326 -0.0755,0.03775 -0.11326,0.07551 -0.18877,0.03775 1.24587,-1.057101 2.52949,-2.038694 4.19065,-2.41623 1.32138,-0.302029 2.56725,-0.07551 3.69986,0.641811 1.1326,0.717318 2.00094,1.661158 2.79376,2.718259 1.01935,1.396883 1.84993,2.907027 2.605,4.454924 0.15101,0.339783 0.15101,0.339783 0.41529,0.07551 1.39688,-1.434636 2.90703,-2.756012 4.56818,-3.850866 1.66116,-1.132608 3.47334,-2.000941 5.47428,-2.416231 1.54789,-0.302028 3.09579,-0.264275 4.64369,0.07551 0.71732,0.151014 1.39688,0.377536 2.07645,0.679564 0.0378,0.03775 0.11326,0.03775 0.11326,0.07551 -2.00094,0.03775 -3.69986,0.792825 -5.24775,1.963187 -0.75508,0.566303 -1.43464,1.208115 -1.96319,2.00094 -0.41529,0.641811 -0.79283,1.35913 -1.09485,2.076448 -0.67957,1.434637 -1.24587,2.94478 -1.66116,4.492678 -0.33978,1.321376 -0.56631,2.680505 -0.67957,4.039635 -0.0755,1.0571 -0.11326,2.114201 -0.0755,3.171302 0.0377,1.283622 0.18876,2.567244 0.33978,3.88862 0.26427,2.151955 0.64181,4.30391 1.01934,6.455865 0.18877,1.170361 0.33979,2.378477 0.56631,3.586592 0.26427,1.736665 1.24587,3.020287 2.64275,4.001881 0,-0.03775 0.0755,0 0.11326,0.03775 z"
+         class="st4" />
+      <path
+         style="fill:#3f2b29;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path34-7"
+         d="m 385.55392,-82.430663 h 1.69891 c 1.35913,0.07551 2.71826,0.302029 4.00188,0.755072 1.01935,0.339782 1.24587,1.019347 0.64181,1.88768 -0.41529,0.604057 -1.01935,1.057101 -1.66116,1.47239 -0.79282,0.52855 -1.66116,0.981594 -2.605,1.321376 -0.94384,0.377536 -1.84992,0 -2.45398,-0.94384 -0.18877,-0.302029 -0.33978,-0.604058 -0.41529,-0.94384 -0.0378,-0.113261 -0.0755,-0.151014 -0.18877,-0.151014 -2.1142,-0.377536 -4.00188,0.113261 -5.54978,1.623404 -1.28362,1.208115 -2.03869,2.756013 -2.56724,4.417171 -0.4908,1.510144 -0.71732,3.020288 -0.79283,4.605939 -0.0755,1.396883 0.0378,2.793766 0.22652,4.152895 0.0378,0.226522 0.11327,0.453044 0.11327,0.717319 0.0378,0.302028 -0.0755,0.566304 -0.30203,0.717318 -0.26428,0.188768 -0.56631,0.151014 -0.86834,0.151014 -0.71731,-0.226521 -1.43463,-0.453043 -2.15195,-0.641811 v -0.490796 c 0,-0.755072 0,-1.472391 0.0378,-2.227463 0.15102,-2.907027 0.60406,-5.7763 1.73667,-8.494559 0.83058,-2.038694 2.03869,-3.813113 3.7376,-5.209996 1.39689,-1.170361 2.98254,-1.925433 4.75696,-2.340723 0.90608,-0.226522 1.73666,-0.339782 2.605,-0.377536 z"
+         class="st6" />
+      <path
+         style="fill:#396e35;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path36"
+         d="m 374.86965,-62.685532 c 0.30203,0 0.60406,0.03775 0.86833,-0.151015 0.26428,-0.188768 0.33978,-0.453043 0.30203,-0.717318 -0.0378,-0.226521 -0.0755,-0.490797 -0.11326,-0.717318 0.15101,0 0.26427,-0.03775 0.41529,-0.03775 0.52855,0.302029 1.09485,0.566304 1.66116,0.868333 1.28362,0.792825 2.07644,1.925433 2.60499,3.322316 0.0755,0.226522 0.15102,0.41529 0.22653,0.641811 -0.30203,-0.264275 -0.60406,-0.52855 -0.90609,-0.755071 -1.20812,-0.906087 -2.52949,-1.547898 -3.92637,-2.076448 -0.41529,-0.113261 -0.75508,-0.226522 -1.13261,-0.377536 z"
+         class="st7" />
+      <path
+         style="fill:#396e35;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path38-9"
+         d="m 372.71769,-63.81814 v 0.490797 c -0.49079,-0.151015 -0.98159,-0.264275 -1.47239,-0.453043 0.52855,-0.03775 1.01935,-0.03775 1.47239,-0.03775 z"
+         class="st7" />
+    </g>
+    <g
+       id="g596"
+       transform="matrix(0.44611981,0,0,0.44611981,44.677261,81.689003)">
+      <path
+         d="m 150.58729,-13.861192 q -5.8632,0 -10.61714,-2.29774 -4.75394,-2.29774 -7.60631,-6.89322 -2.77314,-4.674713 -2.77314,-11.330235 0,-10.696376 5.70474,-16.163413 5.70473,-5.546269 15.21262,-5.546269 3.32776,0 6.73476,0.713092 3.40699,0.633859 6.02166,1.822345 v 10.141749 q -3.24853,-1.426183 -5.78397,-2.139275 -2.53543,-0.792324 -5.07087,-0.792324 -5.22934,0 -8.16094,2.61467 -2.9316,2.535437 -2.9316,8.002474 0,6.100896 2.6939,9.032495 2.77314,2.931599 8.95327,2.931599 4.67471,0 10.37944,-3.169296 v 10.062516 q -2.85236,1.505416 -5.94243,2.218508 -3.01083,0.792324 -6.81399,0.792324 z"
+         style="font-weight:bold;font-size:medium;line-height:0%;font-family:'Maven Pro';fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path569"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 189.64516,-13.702727 q -10.14175,0 -15.21262,-5.387804 -5.07088,-5.387805 -5.07088,-16.004948 0,-11.092538 4.99164,-16.163413 5.07088,-5.070874 15.29186,-5.070874 10.22098,0 15.37109,5.229339 5.15011,5.150107 5.15011,16.004948 0,10.458679 -5.30858,15.925715 -5.30857,5.467037 -15.21262,5.467037 z m 0,-9.428657 q 4.35778,0 6.57629,-3.010832 2.21851,-3.090064 2.21851,-8.953263 0,-6.417826 -2.21851,-9.111728 -2.13927,-2.773134 -6.57629,-2.773134 -4.59548,0 -6.65552,2.773134 -1.98081,2.693902 -1.98081,9.111728 0,6.100896 2.06004,9.032495 2.13927,2.9316 6.57629,2.9316 z"
+         style="font-weight:bold;font-size:medium;line-height:0%;font-family:'Maven Pro';fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path571"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 218.31492,-50.783497 q 1.18848,-1.030021 4.12008,-2.29774 3.01084,-1.267719 6.65553,-2.139275 3.72392,-0.950789 7.13091,-0.950789 6.57629,0 9.11173,1.98081 v 8.636333 q -3.1693,-0.713091 -9.11173,-0.713091 -3.72392,0 -6.10089,0.396162 v 30.979874 h -11.80563 z"
+         style="font-weight:bold;font-size:medium;line-height:0%;font-family:'Maven Pro';fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path573"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 268.56064,-56.329766 q 18.77809,0 18.54039,24.482816 H 260.6374 q 1.50542,8.319403 11.17177,8.319403 2.9316,0 5.70474,-0.792324 2.85236,-0.871556 6.02166,-2.218507 v 10.141749 q -6.49706,2.693902 -14.57876,2.693902 -6.1009,0 -10.69638,-2.456205 -4.59548,-2.535437 -7.13092,-7.21015 -2.53543,-4.674712 -2.53543,-11.013305 0,-10.458679 5.1501,-16.163413 5.22934,-5.783966 14.81646,-5.783966 z m -0.47539,9.032495 q -6.33859,0 -7.52708,7.923242 h 14.81646 q -0.55462,-4.278551 -2.29774,-6.100896 -1.66388,-1.822346 -4.99164,-1.822346 z"
+         style="font-weight:bold;font-size:medium;line-height:0%;font-family:'Maven Pro';fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path575"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 318.12052,-74.790919 h 8.47787 l -29.15753,67.6644822 h -8.39863 z"
+         style="font-weight:bold;font-size:medium;line-height:0%;font-family:'Maven Pro';fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path577"
+         inkscape:connector-curvature="0" />
+    </g>
+    <g
+       id="g589"
+       transform="matrix(0.44611981,0,0,0.44611981,44.677261,81.689003)">
+      <path
+         d="m 15.436598,-51.575821 q 3.090064,-1.663881 9.428657,-3.090064 6.338593,-1.426184 11.330235,-1.426184 8.477868,0 12.756419,3.327762 4.27855,3.327761 4.27855,10.934073 v 26.939021 h -11.80563 v -24.562049 q 0,-6.89322 -7.764776,-6.89322 -1.743113,0 -3.565459,0.396162 -1.822345,0.396162 -2.852367,0.950789 v 30.108318 H 15.436598 Z"
+         style="font-weight:bold;font-size:79.23241425px;line-height:0%;font-family:'Maven Pro';fill:#24af63;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path559"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 66.729683,-45.95032 h -5.863199 v -9.032495 h 5.942431 v -0.950789 q 0.07923,-9.032495 4.120086,-12.518721 4.120085,-3.565459 11.17177,-3.565459 1.267719,0 2.773134,0.237697 1.505416,0.237697 2.456205,0.554627 v 10.379446 q -0.871556,-0.871556 -1.743113,-1.267718 -0.871556,-0.475395 -2.218507,-0.475395 -2.218508,0 -3.486227,1.188486 -1.267718,1.188487 -1.267718,4.040854 v 2.376972 h 8.715565 v 9.032495 h -8.794798 v 31.059107 H 66.729683 Z"
+         style="font-weight:bold;font-size:79.23241425px;line-height:0%;font-family:'Maven Pro';fill:#24af63;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path561"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 96.008535,-43.256418 h 24.165885 v 8.160939 H 96.008535 Z"
+         style="font-weight:bold;font-size:79.23241425px;line-height:0%;font-family:'Maven Pro';fill:#24af63;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path563"
+         inkscape:connector-curvature="0" />
+      <path
+         style="display:inline;fill:url(#f);stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path67"
+         d="m 105.17203,-43.255454 -8.129199,8.160957 h 23.064239 v -8.160957 z" />
+    </g>
+  </g>
+</svg>
diff --git a/assets/sendmail_template.txt b/assets/sendmail_template.txt
index 2d6712200607cb62f31be950cfe4c54e5ca1838a..9afc48067351f78cb54af40a3d76180866edf729 100644
--- a/assets/sendmail_template.txt
+++ b/assets/sendmail_template.txt
@@ -8,6 +8,23 @@ Content-Type: text/html; charset=utf-8
 
 $email_html
 
+--nfcoremimeboundary
+Content-Type: image/png;name="nf-core-hic_logo.png"
+Content-Transfer-Encoding: base64
+Content-ID: <nfcorepipelinelogo>
+Content-Disposition: inline; filename="nf-core-hic_logo.png"
+
+<% out << new File("$baseDir/assets/nf-core-hic_logo.png").
+  bytes.
+  encodeBase64().
+  toString().
+  tokenize( '\n' )*.
+  toList()*.
+  collate( 76 )*.
+  collect { it.join() }.
+  flatten().
+  join( '\n' ) %>
+
 <%
 if (mqcFile){
 def mqcFileObj = new File("$mqcFile")
diff --git a/bin/__pycache__/scrape_software_versions.cpython-36.pyc b/bin/__pycache__/scrape_software_versions.cpython-36.pyc
deleted file mode 100644
index 07062ad2b5afae9e2bc03e055e9776da9b619a1c..0000000000000000000000000000000000000000
Binary files a/bin/__pycache__/scrape_software_versions.cpython-36.pyc and /dev/null differ
diff --git a/bin/digest_genome.py b/bin/digest_genome.py
index db2d151602269c22f3b0a837446bba30a4b442b6..2c29a49e1cf174f12142f78627fd799b83da2788 100755
--- a/bin/digest_genome.py
+++ b/bin/digest_genome.py
@@ -26,47 +26,48 @@ RE_cutsite = {
 
 
 def find_re_sites(filename, sequences, offset):
-    infile = open(filename)
-    chr_id = None
-    big_str = ""
-    indices = []
-    all_indices = []
-    contig_names = []
-    c = 0
-    for line in infile:
-        c += 1
-        if line.startswith(">"):
-            print line.split()[0][1:], "..."
-            # If this is not the first chromosome, find the indices and append
-            # them to the list
-            if chr_id is not None:
-                for rs in range(len(sequences)):
-                    pattern = "(?=%s)" % sequences[rs].lower()
-                    indices += [m.start() + offset[rs]
-                                for m in re.finditer(pattern, big_str)]
-                indices.sort()
-                all_indices.append(indices)
-                indices = []
-            # This is a new chromosome. Empty the sequence string, and add the
-            # correct chrom id
-            big_str = ""
-            chr_id = line.split()[0][1:]
-            if chr_id in contig_names:
-                print "The fasta file contains several instance of",
-                print chr_id, ". Exit."
-                sys.exit(-1)
-            contig_names.append(chr_id)
-        else:
-            # As long as we don't change chromosomes, continue reading the
-            # file, and appending the sequences
-            big_str += line.lower().strip()
-    # Add the indices for the last chromosome
-    for rs in range(len(sequences)):
-        pattern = "(?=%s)" % sequences[rs].lower()
-        indices += [m.start() + offset[rs]
-                    for m in re.finditer(pattern, big_str)]
-    indices.sort()
-    all_indices.append(indices)
+    with open(filename, 'r') as infile:
+        chr_id = None
+        big_str = ""
+        indices = []
+        all_indices = []
+        contig_names = []
+        c = 0
+        for line in infile:
+            c += 1
+            if line.startswith(">"):
+                print("{}...".format(line.split()[0][1:]))
+                # If this is not the first chromosome, find the indices and append
+                # them to the list
+                if chr_id is not None:
+                     for rs in range(len(sequences)):
+                         pattern = "(?={})".format(sequences[rs].lower())
+                         indices += [m.start() + offset[rs]\
+                         for m in re.finditer(pattern, big_str)]
+                     indices.sort()
+                     all_indices.append(indices)
+                     indices = []
+
+                # This is a new chromosome. Empty the sequence string, and add the
+                # correct chrom id
+                big_str = ""
+                chr_id = line.split()[0][1:]
+                if chr_id in contig_names:
+                    print("The fasta file contains several instance of {}. Exit.".format(chr_id))
+                    sys.exit(-1)
+                contig_names.append(chr_id)
+            else:
+                # As long as we don't change chromosomes, continue reading the
+                # file, and appending the sequences
+                big_str += line.lower().strip()
+        # Add the indices for the last chromosome
+        for rs in range(len(sequences)):
+            pattern = "(?={})".format(sequences[rs].lower())
+            indices += [m.start() + offset[rs]
+                        for m in re.finditer(pattern, big_str)]
+        indices.sort()
+        all_indices.append(indices)
+    
     return contig_names, all_indices
 
 
@@ -74,19 +75,35 @@ def find_chromsomose_lengths(reference_filename):
     chromosome_lengths = []
     chromosome_names = []
     length = None
-    infile = open(reference_filename)
-    for line in infile:
-        if line.startswith(">"):
-            chromosome_names.append(line[1:].strip())
-            if length is not None:
-                chromosome_lengths.append(length)
-            length = 0
-        else:
-            length += len(line.strip())
-    chromosome_lengths.append(length)
+    with open(reference_filename, 'r') as infile:
+        for line in infile:
+            if line.startswith(">"):
+                chromosome_names.append(line[1:].strip())
+                if length is not None:
+                    chromosome_lengths.append(length)
+                length = 0
+            else:
+                length += len(line.strip())
+        chromosome_lengths.append(length)
     return chromosome_names, np.array(chromosome_lengths)
 
 
+def replaceN(cs):
+    npos = int(cs.find('N'))
+    cseql = []
+    if npos != -1:
+        for nuc in ["A","C","G","T"]:
+            tmp = cs.replace('N', nuc, 1)
+            tmpl = replaceN(tmp)
+            if type(tmpl) == list:
+                cseql = cseql + tmpl
+            else:
+                cseql.append(tmpl)
+    else:
+        cseql.append(cs)
+    return cseql
+
+
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()
     parser.add_argument('fastafile')
@@ -102,8 +119,13 @@ if __name__ == "__main__":
 
     filename = args.fastafile
     out = args.out
-    cutsites = args.res_sites
-
+    
+    # Split restriction sites if comma-separated
+    cutsites=[]
+    for s in args.res_sites:
+        for m in s.split(','):
+            cutsites.append(m)
+                
     # process args and get restriction enzyme sequences
     sequences = []
     offset = []
@@ -112,25 +134,43 @@ if __name__ == "__main__":
             cseq = ''.join(RE_cutsite[cs.lower()])
         else:
             cseq = cs
+
         offpos = int(cseq.find('^'))
         if offpos == -1:
-            print "Unable to detect offset for", cseq
-            print "Please, use '^' to specified the cutting position,",
-            print "i.e A^GATCT for HindIII digestion"
+            print("Unable to detect offset for {}. Please, use '^' to specify the cutting position,\
+                   i.e A^GATCT for HindIII digestion.".format(cseq))
             sys.exit(-1)
+
+        for nuc in list(set(cs)):
+            if nuc not in ['A','T','G','C','N','^']:
+                print("Find unexpected character ['{}']in restriction motif".format(nuc))
+                print("Note that multiple motifs should be separated by a space (not a comma !)")
+
+                sys.exit(-1)
+
         offset.append(offpos)
         sequences.append(re.sub('\^', '', cseq))
 
+    # replace all N in restriction motif
+    sequences_without_N = []
+    offset_without_N = []
+    for rs in range(len(sequences)):
+        nrs = replaceN(sequences[rs])
+        sequences_without_N = sequences_without_N + nrs
+        offset_without_N = offset_without_N + [offset[rs]] * len(nrs)
+          
+    sequences = sequences_without_N
+    offset = offset_without_N
+    
     if out is None:
         out = os.path.splitext(filename)[0] + "_fragments.bed"
 
-    print "Analyzing", filename
-    print "Restriction site(s)", ",".join(sequences)
-    print "Offset(s)",  ','.join(str(x) for x in offset)
+    print("Analyzing", filename)
+    print("Restriction site(s)", ",".join(sequences))
+    print("Offset(s)",  ','.join(str(x) for x in offset))
 
     # Read fasta file and look for rs per chromosome
-    contig_names, all_indices = find_re_sites(filename, sequences,
-                                              offset=offset)
+    contig_names, all_indices = find_re_sites(filename, sequences,  offset=offset)
     _, lengths = find_chromsomose_lengths(filename)
 
     valid_fragments = []
@@ -142,17 +182,14 @@ if __name__ == "__main__":
         valid_fragments.append(valid_fragments_chr)
 
     # Write results
-    print "Writing to", out, "..."
-    outfile = open(out, "w")
-    for chrom_name, indices in zip(contig_names, valid_fragments):
-        frag_id = 0
-        for begin, end in indices:
-            # allow to remove cases where the enzyme cut at
-            # the first position of the chromosome
-            if end > begin:
-                frag_id += 1
-                frag_name = "HIC_%s_%d" % (chrom_name, frag_id)
-                outfile.write(
-                    "%s\t%d\t%d\t%s\t0\t+\n" % (chrom_name, begin,
-                                                end, frag_name))
-    outfile.close()
+    print("Writing to {} ...".format(out))
+    with open(out, 'w') as outfile:
+        for chrom_name, indices in zip(contig_names, valid_fragments):
+            frag_id = 0
+            for begin, end in indices:
+                # allow to remove cases where the enzyme cut at
+                # the first position of the chromosome
+                if end > begin:
+                    frag_id += 1
+                    frag_name = "HIC_{}_{}".format(str(chrom_name), int(frag_id))
+                    outfile.write("{}\t{}\t{}\t{}\t0\t+\n".format(str(chrom_name), int(begin), int(end), str(frag_name)))
diff --git a/bin/hicpro2higlass.sh b/bin/hicpro2higlass.sh
index 5cc09b773e9dc016fd0bf1d95a023e3b4bb29054..ff11aeeb538bbfb06acead2d22646c93d8a567bf 100755
--- a/bin/hicpro2higlass.sh
+++ b/bin/hicpro2higlass.sh
@@ -103,6 +103,7 @@ function help {
     echo "   -i|--input INPUT : allValidPairs or matrix file generated by HiC-Pro"
     echo "   -r|--res RESOLUTION : .matrix file resolution or maximum resolution to reach from the .allValidPairs input file"
     echo "   -c|--chrom CHROMSIZE : chromosome size file"
+    echo "   -p|--proc NB_CPU : number of CPUs for cooler"
     echo "   [-n|--norm] : run cooler matrix balancing algorithm"
     echo "   [-o|--out] : output path. Default is current path"
     echo "   [-t|--temp] TEMP : path to tmp folder. Default is current path"
@@ -125,6 +126,7 @@ for arg in "$@"; do
       "--bed")   set -- "$@" "-b" ;;
       "--res")   set -- "$@" "-r" ;;
       "--chrom") set -- "$@" "-c" ;;
+      "--proc") set -- "$@" "-p" ;;
       "--out") set -- "$@" "-o" ;;
       "--temp") set -- "$@" "-t" ;;
       "--norm")   set -- "$@" "-n" ;;
@@ -136,18 +138,20 @@ done
 INPUT_HICPRO=""
 INPUT_BED=""
 NORMALIZE=0
+NPROC=1
 CHROMSIZES_FILE=""
 RES=10000
 OUT="./"
 TEMP="./"
 
-while getopts ":i:b:c:r:o:t:nh" OPT
+while getopts ":i:b:c:p:r:o:t:nh" OPT
 do
     case $OPT in
 	i) INPUT_HICPRO=$OPTARG;;
 	b) INPUT_BED=$OPTARG;;
 	n) NORMALIZE=1;;
 	c) CHROMSIZES_FILE=$OPTARG;;
+	p) NPROC=$OPTARG;;
 	r) RES=$OPTARG;;
 	o) OUT=$OPTARG;;
 	t) TEMP=$OPTARG;;
@@ -223,9 +227,9 @@ if [[ $DATATYPE == "MATRIX" ]]; then
 
     echo -e "\nZoomify .cool file ..."
     if [[ $NORMALIZE == 1 ]]; then
-	cooler zoomify --balance $tmp_dir/$out
+	cooler zoomify --nproc ${NPROC} --balance $tmp_dir/$out
     else
-	cooler zoomify --no-balance $tmp_dir/$out
+	cooler zoomify --nproc ${NPROC} $tmp_dir/$out
     fi
     out=$(basename $INPUT_HICPRO | sed -e 's/.mat.*/.mcool/')
     
@@ -233,19 +237,19 @@ elif [[ $DATATYPE == "VALID" ]]; then
     out=$(basename $INPUT_HICPRO | sed -e 's/.allValidPairs.*/.cool/')
 
     awk '{OFS="\t";print $2,$3,$4,$5,$6,$7,1}' $INPUT_HICPRO | sed -e 's/+/1/g' -e 's/-/16/g' > $tmp_dir/contacts.txt
-    cooler csort --nproc 2 -c1 1 -p1 2 -s1 3 -c2 4 -p2 5 -s2 6 \
+    cooler csort --nproc ${NPROC} -c1 1 -p1 2 -s1 3 -c2 4 -p2 5 -s2 6 \
 	   -o $tmp_dir/contacts.sorted.txt.gz  \
 	   $tmp_dir/contacts.txt \
 	   $CHROMSIZES_FILE
     
     cooler makebins $CHROMSIZES_FILE $RES > $tmp_dir/bins.bed
-    cooler cload pairix $tmp_dir/bins.bed $tmp_dir/contacts.sorted.txt.gz $tmp_dir/$out
+    cooler cload pairix --nproc ${NPROC} $tmp_dir/bins.bed $tmp_dir/contacts.sorted.txt.gz $tmp_dir/$out
 
     echo -e "\nZoomify .cool file ..."
     if [[ $NORMALIZE == 1 ]]; then
-	cooler zoomify --balance $tmp_dir/$out
+	cooler zoomify --nproc ${NPROC} --balance $tmp_dir/$out
     else
-	cooler zoomify --no-balance $tmp_dir/$out
+	cooler zoomify --nproc ${NPROC} $tmp_dir/$out
     fi
     out=$(basename $INPUT_HICPRO | sed -e 's/.allValidPairs.*/.mcool/')
 fi
diff --git a/bin/ice b/bin/ice
deleted file mode 100755
index 10f5f224a6064961a04ac2c09bc5b29286bf5484..0000000000000000000000000000000000000000
--- a/bin/ice
+++ /dev/null
@@ -1,124 +0,0 @@
-#! /usr/bin/env python
-
-import sys
-import argparse
-import numpy as np
-from scipy import sparse
-
-import iced
-from iced.io import loadtxt, savetxt
-
-
-parser = argparse.ArgumentParser("ICE normalization")
-parser.add_argument('filename',
-                    metavar='File to load',
-                    type=str,
-                    help='Path to file of contact counts to load')
-parser.add_argument("--results_filename",
-                    "-r",
-                    type=str,
-                    default=None,
-                    help="results_filename")
-parser.add_argument("--filtering_perc", "-f",
-                    type=float,
-                    default=None,
-                    help="Percentage of reads to filter out")
-parser.add_argument("--filter_low_counts_perc",
-                    type=float,
-                    default=0.02,
-                    help="Percentage of reads to filter out")
-parser.add_argument("--filter_high_counts_perc",
-                    type=float,
-                    default=0,
-                    help="Percentage of reads to filter out")
-parser.add_argument("--remove-all-zeros-loci", default=False,
-                    action="store_true",
-                    help="If provided, all non-interacting loci will be "
-                         "removed prior to the filtering strategy chosen.")
-parser.add_argument("--max_iter", "-m", default=100, type=int,
-                    help="Maximum number of iterations")
-parser.add_argument("--eps", "-e", default=0.1, type=float,
-                    help="Precision")
-parser.add_argument("--dense", "-d", default=False, action="store_true")
-parser.add_argument("--output-bias", "-b", default=False, help="Output the bias vector")
-parser.add_argument("--verbose", "-v", default=False)
-
-
-args = parser.parse_args()
-filename = args.filename
-
-# Deprecating filtering_perc option
-filter_low_counts = None
-if "--filtering_perc" in sys.argv:
-    DeprecationWarning(
-        "Option '--filtering_perc' is deprecated. Please use "
-        "'--filter_low_counts_perc' instead.'")
-    # And print it again because deprecation warnings are not displayed for
-    # recent versions of python
-    print "--filtering_perc is deprecated. Please use filter_low_counts_perc"
-    print "instead. This option will be removed in ice 0.3"
-    filter_low_counts = args.filtering_perc
-if "--filter_low_counts_perc" in sys.argv and "--filtering_perc" in sys.argv:
-    raise Warning("This two options are incompatible")
-if "--filtering_perc" is None and "--filter_low_counts_perc" not in sys.argv:
-    filter_low_counts_perc = 0.02
-elif args.filter_low_counts_perc is not None:
-    filter_low_counts_perc = args.filter_low_counts_perc
-
-if args.verbose:
-    print("Using iced version %s" % iced.__version__)
-    print "Loading files..."
-
-# Loads file as i, j, counts
-i, j, data = loadtxt(filename).T
-
-# Detecting whether the file is 0 or 1 based.
-if min(i.min(), j.min()) == 0:
-    index_base = 0
-    N = max(i.max(), j.max()) + 1
-    counts = sparse.coo_matrix((data, (i, j)), shape=(N, N), dtype=float)
-else:
-    index_base = 1
-    N = max(i.max(), j.max())
-    counts = sparse.coo_matrix((data, (i - 1, j - 1)), shape=(N, N), dtype=float)
-
-if args.dense:
-    counts = np.array(counts.todense())
-else:
-    counts = sparse.csr_matrix(counts)
-
-if args.verbose:
-    print "Normalizing..."
-
-if filter_low_counts_perc != 0:
-    counts = iced.filter.filter_low_counts(counts,
-                                           percentage=filter_low_counts_perc,
-                                           remove_all_zeros_loci=args.remove_all_zeros_loci,
-                                           copy=False, sparsity=False, verbose=args.verbose)
-if args.filter_high_counts_perc != 0:
-    counts = iced.filter.filter_high_counts(
-        counts,
-        percentage=args.filter_high_counts_perc,
-        copy=False)
-
-counts, bias = iced.normalization.ICE_normalization(
-    counts, max_iter=args.max_iter, copy=False,
-    verbose=args.verbose, eps=args.eps, output_bias=True)
-
-if args.results_filename is None:
-    results_filename = ".".join(
-        filename.split(".")[:-1]) + "_normalized." + filename.split(".")[-1]
-else:
-    results_filename = args.results_filename
-
-counts = sparse.coo_matrix(counts)
-
-if args.verbose:
-    print "Writing results..."
-
-savetxt(
-    results_filename, counts.col + index_base, counts.row + index_base, counts.data)
-
-
-if args.output_bias:
-    np.savetxt(results_filename + ".biases", bias)
diff --git a/bin/mapped_2hic_dnase.py b/bin/mapped_2hic_dnase.py
index 36c5a605d0001de3775bb70e7934d06be7145797..dd023b0023e0c0a7aa4780bcc04289e467ed877b 100755
--- a/bin/mapped_2hic_dnase.py
+++ b/bin/mapped_2hic_dnase.py
@@ -21,14 +21,14 @@ import pysam
 
 def usage():
     """Usage function"""
-    print "Usage : python mapped_2hic_dnase.py"
-    print "-r/--mappedReadsFile <BAM/SAM file of mapped reads>"
-    print "[-o/--outputDir] <Output directory. Default is current directory>"
-    print "[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>"
-    print "[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>"
-    print "[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>"
-    print "[-v/--verbose] <Verbose>"
-    print "[-h/--help] <Help>"
+    print("Usage : python mapped_2hic_dnase.py")
+    print("-r/--mappedReadsFile <BAM/SAM file of mapped reads>")
+    print("[-o/--outputDir] <Output directory. Default is current directory>")
+    print("[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>")
+    print("[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>")
+    print("[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>")
+    print("[-v/--verbose] <Verbose>")
+    print("[-h/--help] <Help>")
     return
 
 
@@ -78,11 +78,11 @@ def get_read_pos(read, st="start"):
         list of aligned reads
     """
     if st == "middle":
-        pos = read.pos + int(read.alen/2)
+        pos = read.reference_start + int(read.alen/2)
     elif st =="start":
         pos = get_read_start(read)
     elif st == "left":
-        pos = read.pos
+        pos = read.reference_start
 
     return pos
 
@@ -92,9 +92,9 @@ def get_read_start(read):
     Return the 5' end of the read                                                                                                                                                                              
     """
     if read.is_reverse:
-        pos = read.pos + read.alen -1
+        pos = read.reference_start + read.alen -1
     else:
-        pos = read.pos
+        pos = read.reference_start
     return pos
 
 
@@ -108,20 +108,16 @@ def get_ordered_reads(read1, read2):
     read1 = [AlignedRead]
     read2 = [AlignedRead]
     """
-    if read1.tid == read2.tid:
+    if read1.reference_id == read2.reference_id:
         if get_read_pos(read1) < get_read_pos(read2):
-            r1 = read1
-            r2 = read2
+            r1, r2 = read1, read2
         else:
-            r1 = read2
-            r2 = read1
+            r1, r2 = read2, read1
     else:
-        if read1.tid < read2.tid:
-            r1 = read1
-            r2 = read2
+        if read1.reference_id < read2.reference_id:
+            r1, r2 = read1, read2
         else:
-            r1 = read2
-            r2 = read1
+            r1, r2 = read2, read1
 
     return r1, r2
 
@@ -134,7 +130,7 @@ def isIntraChrom(read1, read2):
     read2 : [AlignedRead]
 
     """
-    if read1.tid == read2.tid:
+    if read1.reference_id == read2.reference_id:
         return True
     else:
         return False
@@ -187,7 +183,7 @@ def get_cis_dist(read1, read2):
 
 
 def get_read_tag(read, tag):
-    for t in read.tags:
+    for t in read.get_tags():
         if t[0] == tag:
             return t[1]
     return None
@@ -229,11 +225,11 @@ if __name__ == "__main__":
 
     # Verbose mode
     if verbose:
-        print "## overlapMapped2HiCFragments.py"
-        print "## mappedReadsFile=", mappedReadsFile
-        print "## minCisDist=", minDist
-        print "## allOuput=", allOutput
-        print "## verbose=", verbose, "\n"
+        print("## overlapMapped2HiCFragments.py")
+        print("## mappedReadsFile=", mappedReadsFile)
+        print("## minCisDist=", minDist)
+        print("## allOuput=", allOutput)
+        print("## verbose={}\n".format(verbose))
 
     # Initialize variables
     reads_counter = 0
@@ -271,7 +267,7 @@ if __name__ == "__main__":
 
     # Read the SAM/BAM file
     if verbose:
-        print "## Opening SAM/BAM file '", mappedReadsFile, "'..."
+        print("## Opening SAM/BAM file {} ...".format(mappedReadsFile))
     samfile = pysam.Samfile(mappedReadsFile, "rb")
 
     # Reads are 0-based too (for both SAM and BAM format)
@@ -286,7 +282,7 @@ if __name__ == "__main__":
         if read.is_read1:
             r1 = read
             if not r1.is_unmapped:
-                r1_chrom = samfile.getrname(r1.tid)
+                r1_chrom = samfile.get_reference_name(r1.reference_id)
             else:
                 r1_chrom = None
 
@@ -294,11 +290,11 @@ if __name__ == "__main__":
         elif read.is_read2:
             r2 = read
             if not r2.is_unmapped:
-                r2_chrom = samfile.getrname(r2.tid)
+                r2_chrom = samfile.get_reference_name(r2.reference_id)
             else:
                 r2_chrom = None
 
-            if isIntraChrom(r1,r2):
+            if isIntraChrom(r1, r2):
                 dist = get_cis_dist(r1, r2)
             else:
                 dist = None
@@ -368,8 +364,8 @@ if __name__ == "__main__":
                     
                     ##reorient reads to ease duplicates removal
                     or1, or2 = get_ordered_reads(r1, r2)
-                    or1_chrom = samfile.getrname(or1.tid)
-                    or2_chrom = samfile.getrname(or2.tid)
+                    or1_chrom = samfile.get_reference_name(or1.reference_id)
+                    or2_chrom = samfile.get_reference_name(or2.reference_id)
 
                     ##reset as tag now that the reads are oriented
                     r1as = get_read_tag(or1, gtag)
@@ -378,7 +374,7 @@ if __name__ == "__main__":
                         htag = str(r1as)+"-"+str(r2as)
                         
                     cur_handler.write(
-                        or1.qname + "\t" +
+                        or1.query_name + "\t" +
                         or1_chrom + "\t" +
                         str(get_read_pos(or1)+1) + "\t" +
                         str(get_read_strand(or1)) + "\t" +
@@ -394,7 +390,7 @@ if __name__ == "__main__":
                 
                 elif r2.is_unmapped and not r1.is_unmapped:
                     cur_handler.write(
-                        r1.qname + "\t" +
+                        r1.query_name + "\t" +
                         r1_chrom + "\t" +
                         str(get_read_pos(r1)+1) + "\t" +
                         str(get_read_strand(r1)) + "\t" +
@@ -408,7 +404,7 @@ if __name__ == "__main__":
                         "*" + "\n")
                 elif r1.is_unmapped and not r2.is_unmapped:
                     cur_handler.write(
-                        r2.qname + "\t" +
+                        r2.query_name + "\t" +
                         "*" + "\t" +
                         "*" + "\t" +
                         "*" + "\t" +
@@ -422,7 +418,7 @@ if __name__ == "__main__":
                         str(r2.mapping_quality) + "\n")
 
             if (reads_counter % 100000 == 0 and verbose):
-                print "##", reads_counter
+                print("##", reads_counter)
 
     # Close handler
     handle_valid.close()
@@ -432,33 +428,28 @@ if __name__ == "__main__":
         handle_filt.close()
 
     # Write stats file
-    handle_stat = open(outputDir + '/' + baseReadsFile + '.RSstat', 'w')
-    handle_stat.write("## Hi-C processing - no restriction fragments\n")
-    handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
-    handle_stat.write(
-        "Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
-    handle_stat.write(
-        "Valid_interaction_pairs_RR\t" + str(valid_counter_RR) + "\n")
-    handle_stat.write(
-        "Valid_interaction_pairs_RF\t" + str(valid_counter_RF) + "\n")
-    handle_stat.write(
-        "Valid_interaction_pairs_FR\t" + str(valid_counter_FR) + "\n")
-    handle_stat.write("Single-end_pairs\t" + str(single_counter) + "\n")
-    handle_stat.write("Filtered_pairs\t" + str(filt_counter) + "\n")
-    handle_stat.write("Dumped_pairs\t" + str(dump_counter) + "\n")
+    with open(outputDir + '/' + baseReadsFile + '.RSstat', 'w') as handle_stat:
+        handle_stat.write("## Hi-C processing - no restriction fragments\n")
+        handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
+        handle_stat.write("Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
+        handle_stat.write("Valid_interaction_pairs_RR\t" + str(valid_counter_RR) + "\n")
+        handle_stat.write("Valid_interaction_pairs_RF\t" + str(valid_counter_RF) + "\n")
+        handle_stat.write("Valid_interaction_pairs_FR\t" + str(valid_counter_FR) + "\n")
+        handle_stat.write("Single-end_pairs\t" + str(single_counter) + "\n")
+        handle_stat.write("Filtered_pairs\t" + str(filt_counter) + "\n")
+        handle_stat.write("Dumped_pairs\t" + str(dump_counter) + "\n")
 
     ## Write AS report
-    if gtag is not None:
-        handle_stat.write("## ======================================\n")
-        handle_stat.write("## Allele specific information\n")
-        handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
-        handle_stat.write("Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t" + str(UG1_ascounter+G1U_ascounter) + "\n")
-        handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
-        handle_stat.write("Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t" + str(UG2_ascounter+G2U_ascounter) + "\n")
-        handle_stat.write("Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter+G2G1_ascounter) + "\n")
-        handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
-        handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
-
-    handle_stat.close()
+        if gtag is not None:
+            handle_stat.write("## ======================================\n")
+            handle_stat.write("## Allele specific information\n")
+            handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t" + str(UG1_ascounter+G1U_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t" + str(UG2_ascounter+G2U_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter+G2G1_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
+
 
 
diff --git a/bin/mapped_2hic_fragments.py b/bin/mapped_2hic_fragments.py
index efa32e6d681c6fc1cbf60843bc588f1371de30f1..e823ee02cce862b704c2b6939d1642db579665be 100755
--- a/bin/mapped_2hic_fragments.py
+++ b/bin/mapped_2hic_fragments.py
@@ -12,7 +12,6 @@
 Script to keep only valid 3C products - DE and SC are removed
 Output is : readname / 
 """
-
 import time
 import getopt
 import sys
@@ -24,20 +23,20 @@ from bx.intervals.intersection import Intersecter, Interval
 
 def usage():
     """Usage function"""
-    print "Usage : python mapped_2hic_fragments.py"
-    print "-f/--fragmentFile <Restriction fragment file GFF3>"
-    print "-r/--mappedReadsFile <BAM/SAM file of mapped reads>"
-    print "[-o/--outputDir] <Output directory. Default is current directory>"
-    print "[-s/--shortestInsertSize] <Shortest insert size of mapped reads to consider>"
-    print "[-l/--longestInsertSize] <Longest insert size of mapped reads to consider>"
-    print "[-t/--shortestFragmentLength] <Shortest restriction fragment length to consider>"
-    print "[-m/--longestFragmentLength] <Longest restriction fragment length to consider>"
-    print "[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>"
-    print "[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>"
-    print "[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>"
-    print "[-S/--sam] <Output an additional SAM file with flag 'CT' for pairs classification>"
-    print "[-v/--verbose] <Verbose>"
-    print "[-h/--help] <Help>"
+    print("Usage : python mapped_2hic_fragments.py")
+    print("-f/--fragmentFile <Restriction fragment file GFF3>")
+    print("-r/--mappedReadsFile <BAM/SAM file of mapped reads>")
+    print("[-o/--outputDir] <Output directory. Default is current directory>")
+    print("[-s/--shortestInsertSize] <Shortest insert size of mapped reads to consider>")
+    print("[-l/--longestInsertSize] <Longest insert size of mapped reads to consider>")
+    print("[-t/--shortestFragmentLength] <Shortest restriction fragment length to consider>")
+    print("[-m/--longestFragmentLength] <Longest restriction fragment length to consider>")
+    print("[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>")
+    print("[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>")
+    print("[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>")
+    print("[-S/--sam] <Output an additional SAM file with flag 'CT' for pairs classification>")
+    print("[-v/--verbose] <Verbose>")
+    print("[-h/--help] <Help>")
     return
 
 
@@ -53,7 +52,7 @@ def get_args():
              "minInsertSize=", "maxInsertSize", 
              "minFragSize", "maxFragSize", 
              "minDist",
-             "gatg", "samOut", "verbose", "all", "help"])
+             "gatg", "sam", "verbose", "all", "help"])
     except getopt.GetoptError:
         usage()
         sys.exit(-1)
@@ -67,7 +66,7 @@ def timing(function, *args):
     """
     startTime = time.time()
     result = function(*args)
-    print '%s function took %0.3f ms' % (function.func_name, (time.time() - startTime) * 1000)
+    print('{} function took {:.3f}ms'.format(function.__name__, (time.time() - startTime) * 1000))
     return result
 
 
@@ -96,8 +95,7 @@ def isIntraChrom(read1, read2):
     """
     if read1.tid == read2.tid:
         return True
-    else:
-        return False
+    return False
 
 
 def get_cis_dist(read1, read2):
@@ -114,8 +112,7 @@ def get_cis_dist(read1, read2):
      if not read1.is_unmapped and not read2.is_unmapped:         
          ## Contact distances can be calculated for intrachromosomal reads only
          if isIntraChrom(read1, read2):
-             r1pos = get_read_pos(read1)
-             r2pos = get_read_pos(read2)
+             r1pos, r2pos = get_read_pos(read1), get_read_pos(read2)
              dist = abs(r1pos - r2pos)
      return dist
 
@@ -138,11 +135,11 @@ def get_read_pos(read, st="start"):
     """
 
     if st == "middle":
-        pos = read.pos + int(read.alen/2)
+        pos = read.reference_start + int(read.alen/2)
     elif st =="start":
         pos = get_read_start(read)
     elif st == "left":
-        pos = read.pos
+        pos = read.reference_start
     
     return pos
 
@@ -152,9 +149,9 @@ def get_read_start(read):
     Return the 5' end of the read
     """
     if read.is_reverse:
-        pos = read.pos + read.alen -1
+        pos = read.reference_start + read.alen -1
     else:
-        pos = read.pos
+        pos = read.reference_start
     return pos
 
 def get_ordered_reads(read1, read2):
@@ -178,18 +175,14 @@ def get_ordered_reads(read1, read2):
     """
     if read1.tid == read2.tid:
         if get_read_pos(read1) < get_read_pos(read2):
-            r1 = read1
-            r2 = read2
+            r1, r2 = read1, read2
         else:
-            r1 = read2
-            r2 = read1
+            r1, r2 = read2, read1
     else:
         if read1.tid < read2.tid:
-            r1 = read1
-            r2 = read2
+            r1, r2 = read1, read2
         else:
-            r1 = read2
-            r2 = read1
+            r1, r2 = read2, read1
                 
     return r1, r2
 
@@ -206,46 +199,44 @@ def load_restriction_fragment(in_file, minfragsize=None, maxfragsize=None, verbo
     """
     resFrag = {}
     if verbose:
-        print "## Loading Restriction File Intervals '", in_file, "'..."
-
+        print("## Loading Restriction File Intervals {} ...".format(in_file))
     bed_handle = open(in_file)
     nline = 0
     nfilt = 0
     for line in bed_handle:
-        nline +=1
-        bedtab = line.split("\t")
-        try:
-            chromosome, start, end, name = bedtab[:4]
-        except ValueError:
-            print "Warning : wrong input format in line", nline,". Not a BED file !?"
-            continue
+         nline += 1
+         bedtab = line.split("\t")
+         try:
+              chromosome, start, end, name = bedtab[:4]
+         except ValueError:
+              print("Warning : wrong input format in line {}. Not a BED file ?!".format(nline))
+              continue
 
         # BED files are zero-based as Intervals objects
-        start = int(start)  # + 1
-        end = int(end)
-        fragl = abs(end - start)
-        name = name.strip()
-
-        ## Discard fragments outside the size range
-        filt=False
-        if minfragsize != None and int(fragl) < int(minfragsize):
-            nfilt+=1
-            filt=True
-        elif maxfragsize != None and int(fragl) > int(maxfragsize):
-            nfilt+=1
-            filt=True
+         start = int(start)  # + 1
+         end = int(end)
+         fragl = abs(end - start)
+         name = name.strip()
+
+         ## Discard fragments outside the size range
+         filt = False
+         if minfragsize != None and int(fragl) < int(minfragsize):
+             nfilt += 1
+             filt = True
+         elif maxfragsize != None and int(fragl) > int(maxfragsize):
+             nfilt += 1
+             filt = True
        
-        if chromosome in resFrag:
-            tree = resFrag[chromosome]
-            tree.add_interval(Interval(start, end, value={'name': name, 'filter': filt}))
-        else:
-            tree = Intersecter()
-            tree.add_interval(Interval(start, end, value={'name': name, 'filter': filt}))
-            resFrag[chromosome] = tree
+         if chromosome in resFrag:
+             tree = resFrag[chromosome]
+             tree.add_interval(Interval(start, end, value={'name': name, 'filter': filt}))
+         else:
+             tree = Intersecter()
+             tree.add_interval(Interval(start, end, value={'name': name, 'filter': filt}))
+             resFrag[chromosome] = tree
     
     if nfilt > 0:
-        print "Warning : ", nfilt ,"fragment(s) outside of range and discarded. ", nline - nfilt, " remaining."
-
+        print("Warning : {} fragment(s) outside of range and discarded. {} remaining.".format(nfilt, nline - nfilt))
     bed_handle.close()
     return resFrag
 
@@ -260,22 +251,22 @@ def get_overlapping_restriction_fragment(resFrag, chrom, read):
     read = the read to intersect [AlignedRead]
 
     """
-    # Get read position (middle or 5' end)
+    # Get read position (middle or start)
     pos = get_read_pos(read, st="middle")
     
     if chrom in resFrag:
         # Overlap with the position of the read (zero-based)
         resfrag = resFrag[chrom].find(pos, pos+1)
         if len(resfrag) > 1:
-            print "Warning : ", len(resfrag), " restriction fragments found for ", read.qname, "- skipped"
+            print("Warning : {} restictions fragments found for {} -skipped".format(len(resfrag), read.query_name))
             return None
         elif len(resfrag) == 0:
-            print "Warning - no restriction fragments for ", read.qname ," at ", chrom, ":", pos
+            print("Warning - no restriction fragments for {} at {} : {}".format(read.query_name, chrom, pos))
             return None
         else:
             return resfrag[0]
     else:
-        print "Warning - no restriction fragments for ", read.qname," at ", chrom, ":", pos
+        print("Warning - no restriction fragments for {} at {} : {}".format(read.qname, chrom, pos))
         return None
 
 
@@ -301,11 +292,11 @@ def is_religation(read1, read2, frag1, frag2):
     Check the orientation of reads -><-
 
     """
-    ret=False
+    ret = False
     if are_contiguous_fragments(frag1, frag2, read1.tid, read2.tid):
         #r1, r2 = get_ordered_reads(read1, read2)
         #if get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
-        ret=True
+        ret = True
     return ret
 
 
@@ -374,8 +365,8 @@ def get_PE_fragment_size(read1, read2, resFrag1, resFrag2, interactionType):
 
     read1 : [AlignedRead]
     read2 : [AlignedRead]
-    resfrag1 = restrictin fragment overlapping the R1 read [interval]
-    resfrag1 = restrictin fragment overlapping the R1 read [interval]
+    resfrag1 = restriction fragment overlapping the R1 read [interval]
+    resfrag1 = restriction fragment overlapping the R1 read [interval]
     interactionType : Type of interaction from get_interaction_type() [str]
 
     """
@@ -442,7 +433,7 @@ def get_interaction_type(read1, read1_chrom, resfrag1, read2,
     # If returned InteractionType=None -> Same restriction fragment
     # and same strand = Dump
     interactionType = None
- 
+      
     if not read1.is_unmapped and not read2.is_unmapped and resfrag1 is not None and resfrag2 is not None:
         # same restriction fragment
         if resfrag1 == resfrag2:
@@ -463,7 +454,7 @@ def get_interaction_type(read1, read1_chrom, resfrag1, read2,
 
 
 def get_read_tag(read, tag):
-    for t in read.tags:
+    for t in read.get_tags():
         if t[0] == tag:
             return t[1]
     return None
@@ -501,9 +492,9 @@ if __name__ == "__main__":
             minInsertSize = arg
         elif opt in ("-l", "--longestInsertSize"):
             maxInsertSize = arg
-        elif opt in ("-t", "--shortestFragmentSize"):
+        elif opt in ("-t", "--shortestFragmentLength"):
             minFragSize = arg
-        elif opt in ("-m", "--longestFragmentSize"):
+        elif opt in ("-m", "--longestFragmentLength"):
             maxFragSize = arg
         elif opt in ("-d", "--minCisDist"):
             minDist = arg
@@ -520,16 +511,16 @@ if __name__ == "__main__":
 
     # Verbose mode
     if verbose:
-        print "## overlapMapped2HiCFragments.py"
-        print "## mappedReadsFile=", mappedReadsFile
-        print "## fragmentFile=", fragmentFile
-        print "## minInsertSize=", minInsertSize
-        print "## maxInsertSize=", maxInsertSize
-        print "## minFragSize=", minFragSize
-        print "## maxFragSize=", maxFragSize
-        print "## allOuput=", allOutput
-        print "## SAM ouput=", samOut
-        print "## verbose=", verbose, "\n"
+        print("## overlapMapped2HiCFragments.py")
+        print("## mappedReadsFile=", mappedReadsFile)
+        print("## fragmentFile=", fragmentFile)
+        print("## minInsertSize=", minInsertSize)
+        print("## maxInsertSize=", maxInsertSize)
+        print("## minFragSize=", minFragSize)
+        print("## maxFragSize=", maxFragSize)
+        print("## allOuput=", allOutput)
+        print("## SAM ouput=", samOut)
+        print("## verbose={}\n".format(verbose))
 
     # Initialize variables
     reads_counter = 0
@@ -576,7 +567,7 @@ if __name__ == "__main__":
      
     # Read the SAM/BAM file
     if verbose:
-        print "## Opening SAM/BAM file '", mappedReadsFile, "'..."
+        print("## Opening SAM/BAM file {} ...".format(mappedReadsFile))
     samfile = pysam.Samfile(mappedReadsFile, "rb")
 
     if samOut:
@@ -585,7 +576,7 @@ if __name__ == "__main__":
     # Reads are 0-based too (for both SAM and BAM format)
     # Loop on all reads
     if verbose:
-        print "## Classifying Interactions ..."
+        print("## Classifying Interactions ...")
 
     for read in samfile.fetch(until_eof=True):
         reads_counter += 1
@@ -596,7 +587,7 @@ if __name__ == "__main__":
         if read.is_read1:
             r1 = read
             if not r1.is_unmapped:
-                r1_chrom = samfile.getrname(r1.tid)
+                r1_chrom = samfile.get_reference_name(r1.tid)
                 r1_resfrag = get_overlapping_restriction_fragment(resFrag, r1_chrom, r1)
             else:
                 r1_resfrag = None
@@ -606,13 +597,14 @@ if __name__ == "__main__":
         elif read.is_read2:
             r2 = read
             if not r2.is_unmapped:
-                r2_chrom = samfile.getrname(r2.tid)
+                r2_chrom = samfile.get_reference_name(r2.tid)
                 r2_resfrag = get_overlapping_restriction_fragment(resFrag, r2_chrom, r2)
             else:
                 r2_resfrag = None
                 r2_chrom = None
 
             if r1_resfrag is not None or r2_resfrag is not None:
+
                 interactionType = get_interaction_type(r1, r1_chrom, r1_resfrag, r2, r2_chrom, r2_resfrag, verbose)
                 dist = get_PE_fragment_size(r1, r2, r1_resfrag, r2_resfrag, interactionType)
                 cdist = get_cis_dist(r1, r2)
@@ -705,8 +697,8 @@ if __name__ == "__main__":
                 if not r1.is_unmapped and not r2.is_unmapped:                 
                     ##reorient reads to ease duplicates removal
                     or1, or2 = get_ordered_reads(r1, r2)
-                    or1_chrom = samfile.getrname(or1.tid)
-                    or2_chrom = samfile.getrname(or2.tid)
+                    or1_chrom = samfile.get_reference_name(or1.tid)
+                    or2_chrom = samfile.get_reference_name(or2.tid)
                     
                     ##reset as tag now that the reads are oriented
                     r1as = get_read_tag(or1, gtag)
@@ -724,12 +716,16 @@ if __name__ == "__main__":
 
                     if or1_resfrag is not None:
                         or1_fragname = or1_resfrag.value['name']
-                    
+                    else:
+                        or1_fragname = 'None'
+                        
                     if or2_resfrag is not None:
                         or2_fragname = or2_resfrag.value['name']
-                    
+                    else:
+                        or2_fragname = 'None'
+                        
                     cur_handler.write(
-                        or1.qname + "\t" +
+                        or1.query_name + "\t" +
                         or1_chrom + "\t" +
                         str(get_read_pos(or1)+1) + "\t" +
                         str(get_read_strand(or1)) + "\t" +
@@ -748,7 +744,7 @@ if __name__ == "__main__":
                         r1_fragname = r1_resfrag.value['name']
                           
                     cur_handler.write(
-                        r1.qname + "\t" +
+                        r1.query_name + "\t" +
                         r1_chrom + "\t" +
                         str(get_read_pos(r1)+1) + "\t" +
                         str(get_read_strand(r1)) + "\t" +
@@ -765,7 +761,7 @@ if __name__ == "__main__":
                         r2_fragname = r2_resfrag.value['name']
                     
                     cur_handler.write(
-                        r2.qname + "\t" +
+                        r2.query_name + "\t" +
                         "*" + "\t" +
                         "*" + "\t" +
                         "*" + "\t" +
@@ -786,7 +782,7 @@ if __name__ == "__main__":
                     handle_sam.write(r2)
 
             if (reads_counter % 100000 == 0 and verbose):
-                print "##", reads_counter
+                print("##", reads_counter)
 
     # Close handler
     handle_valid.close()
@@ -803,14 +799,10 @@ if __name__ == "__main__":
     handle_stat = open(outputDir + '/' + baseReadsFile + '.RSstat', 'w')
     handle_stat.write("## Hi-C processing\n")
     handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
-    handle_stat.write(
-        "Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
-    handle_stat.write(
-        "Valid_interaction_pairs_RR\t" + str(valid_counter_RR) + "\n")
-    handle_stat.write(
-        "Valid_interaction_pairs_RF\t" + str(valid_counter_RF) + "\n")
-    handle_stat.write(
-        "Valid_interaction_pairs_FR\t" + str(valid_counter_FR) + "\n")
+    handle_stat.write("Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
+    handle_stat.write("Valid_interaction_pairs_RR\t" + str(valid_counter_RR) + "\n")
+    handle_stat.write("Valid_interaction_pairs_RF\t" + str(valid_counter_RF) + "\n")
+    handle_stat.write("Valid_interaction_pairs_FR\t" + str(valid_counter_FR) + "\n")
     handle_stat.write("Dangling_end_pairs\t" + str(de_counter) + "\n")
     handle_stat.write("Religation_pairs\t" + str(re_counter) + "\n")
     handle_stat.write("Self_Cycle_pairs\t" + str(sc_counter) + "\n")
@@ -834,4 +826,3 @@ if __name__ == "__main__":
 
     if samOut:
         samfile.close()
-
diff --git a/bin/markdown_to_html.py b/bin/markdown_to_html.py
new file mode 100755
index 0000000000000000000000000000000000000000..57cc4263fe4182373949388b5aa88e20d60a3c70
--- /dev/null
+++ b/bin/markdown_to_html.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+from __future__ import print_function
+import argparse
+import markdown
+import os
+import sys
+
+def convert_markdown(in_fn):
+    input_md = open(in_fn, mode="r", encoding="utf-8").read()
+    html = markdown.markdown(
+        "[TOC]\n" + input_md,
+        extensions = [
+            'pymdownx.extra',
+            'pymdownx.b64',
+            'pymdownx.highlight',
+            'pymdownx.emoji',
+            'pymdownx.tilde',
+            'toc'
+        ],
+        extension_configs = {
+            'pymdownx.b64': {
+                'base_path': os.path.dirname(in_fn)
+            },
+            'pymdownx.highlight': {
+                'noclasses': True
+            },
+            'toc': {
+                'title': 'Table of Contents'
+            }
+        }
+    )
+    return html
+
+def wrap_html(contents):
+    header = """<!DOCTYPE html><html>
+    <head>
+        <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous">
+        <style>
+            body {
+              font-family: -apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";
+              padding: 3em;
+              margin-right: 350px;
+              max-width: 100%;
+            }
+            .toc {
+              position: fixed;
+              right: 20px;
+              width: 300px;
+              padding-top: 20px;
+              overflow: scroll;
+              height: calc(100% - 3em - 20px);
+            }
+            .toctitle {
+              font-size: 1.8em;
+              font-weight: bold;
+            }
+            .toc > ul {
+              padding: 0;
+              margin: 1rem 0;
+              list-style-type: none;
+            }
+            .toc > ul ul { padding-left: 20px; }
+            .toc > ul > li > a { display: none; }
+            img { max-width: 800px; }
+            pre {
+              padding: 0.6em 1em;
+            }
+            h2 {
+
+            }
+        </style>
+    </head>
+    <body>
+    <div class="container">
+    """
+    footer = """
+    </div>
+    </body>
+    </html>
+    """
+    return header + contents + footer
+
+
+def parse_args(args=None):
+    parser = argparse.ArgumentParser()
+    parser.add_argument('mdfile', type=argparse.FileType('r'), nargs='?',
+                        help='File to convert. Defaults to stdin.')
+    parser.add_argument('-o', '--out', type=argparse.FileType('w'),
+                        default=sys.stdout,
+                        help='Output file name. Defaults to stdout.')
+    return parser.parse_args(args)
+
+def main(args=None):
+    args = parse_args(args)
+    converted_md = convert_markdown(args.mdfile.name)
+    html = wrap_html(converted_md)
+    args.out.write(html)
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/bin/markdown_to_html.r b/bin/markdown_to_html.r
deleted file mode 100755
index abe1335070d84f0d9a17dae7b6d482341f7f59a8..0000000000000000000000000000000000000000
--- a/bin/markdown_to_html.r
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env Rscript
-
-# Command line argument processing
-args = commandArgs(trailingOnly=TRUE)
-if (length(args) < 2) {
-  stop("Usage: markdown_to_html.r <input.md> <output.html>", call.=FALSE)
-}
-markdown_fn <- args[1]
-output_fn <- args[2]
-
-# Load / install packages
-if (!require("markdown")) {
-  install.packages("markdown", dependencies=TRUE, repos='http://cloud.r-project.org/')
-  library("markdown")
-}
-
-base_css_fn <- getOption("markdown.HTML.stylesheet")
-base_css <- readChar(base_css_fn, file.info(base_css_fn)$size)
-custom_css <-  paste(base_css, "
-body {
-  padding: 3em;
-  margin-right: 350px;
-  max-width: 100%;
-}
-#toc {
-  position: fixed;
-  right: 20px;
-  width: 300px;
-  padding-top: 20px;
-  overflow: scroll;
-  height: calc(100% - 3em - 20px);
-}
-#toc_header {
-  font-size: 1.8em;
-  font-weight: bold;
-}
-#toc > ul {
-  padding-left: 0;
-  list-style-type: none;
-}
-#toc > ul ul { padding-left: 20px; }
-#toc > ul > li > a { display: none; }
-img { max-width: 800px; }
-")
-
-markdownToHTML(
-  file = markdown_fn,
-  output = output_fn,
-  stylesheet = custom_css,
-  options = c('toc', 'base64_images', 'highlight_code')
-)
diff --git a/bin/mergeSAM.py b/bin/mergeSAM.py
index fdf0c67dfc24f161266c48506bdfda6b3eb7c899..12917b16277a0a768269f611cd13422bccbe98a1 100755
--- a/bin/mergeSAM.py
+++ b/bin/mergeSAM.py
@@ -19,20 +19,19 @@ import sys
 import os
 import re
 import pysam
-from itertools import izip
 
 def usage():
     """Usage function"""
-    print "Usage : python mergeSAM.py"
-    print "-f/--forward <forward read mapped file>"
-    print "-r/--reverse <reverse read mapped file>"
-    print "[-o/--output] <Output file. Default is stdin>"
-    print "[-s/--single] <report singleton>"
-    print "[-m/--multi] <report multiple hits>"
-    print "[-q/--qual] <minimum reads mapping quality>"
-    print "[-t/--stat] <generate a stat file>"
-    print "[-v/--verbose] <Verbose>"
-    print "[-h/--help] <Help>"
+    print("Usage : python mergeSAM.py")
+    print("-f/--forward <forward read mapped file>")
+    print("-r/--reverse <reverse read mapped file>")
+    print("[-o/--output] <Output file. Default is stdin>")
+    print("[-s/--single] <report singleton>")
+    print("[-m/--multi] <report multiple hits>")
+    print("[-q/--qual] <minimum reads mapping quality>")
+    print("[-t/--stat] <generate a stat file>")
+    print("[-v/--verbose] <Verbose>")
+    print("[-h/--help] <Help>")
     return
 
 
@@ -53,37 +52,36 @@ def get_args():
 
 
 def is_unique_bowtie2(read):
-    ret = False
-    if not read.is_unmapped and read.has_tag('AS'):
-        if read.has_tag('XS'):
-            primary =  read.get_tag('AS')
-            secondary = read.get_tag('XS')
-            if (primary > secondary):
-                ret = True
-        else:
-            ret = True
-    
-    return ret
+	ret = False
+	if not read.is_unmapped and read.has_tag('AS'):
+		if read.has_tag('XS'):
+			primary =  read.get_tag('AS')
+			secondary = read.get_tag('XS')
+			if (primary > secondary):
+				ret = True
+		else:
+			ret = True
+	return ret
 
 ## Remove everything after "/" or " " in read's name
 def get_read_name(read):
-    name = read.qname
+    name = read.query_name
     #return name.split("/",1)[0]
     return re.split('/| ', name)[0]
 
 def sam_flag(read1, read2, hr1, hr2):
+	
+	f1 = read1.flag
+	f2 = read2.flag
 
-    f1 = read1.flag
-    f2 = read2.flag
-
-    if r1.is_unmapped == False:
-        r1_chrom = hr1.getrname(r1.tid)
-    else:
-        r1_chrom="*"
-    if r2.is_unmapped == False:
-        r2_chrom = hr2.getrname(r2.tid)
-    else:
-        r2_chrom="*"
+	if r1.is_unmapped == False:
+		r1_chrom = hr1.get_reference_name(r1.reference_id)
+	else:
+		r1_chrom = "*"
+	if r2.is_unmapped == False:
+		r2_chrom = hr2.get_reference_name(r2.reference_id)
+	else:
+		r2_chrom="*"
 
 
   ##Relevant bitwise flags (flag in an 11-bit binary number)
@@ -101,226 +99,221 @@ def sam_flag(read1, read2, hr1, hr2):
   ##Output example: a paired-end read that aligns to the reverse strand 
   ##and is the first mate in the pair will have flag 83 (= 64 + 16 + 2 + 1)
   
-    if f1 & 0x4:
-        f1 = f1 | 0x8
+	if f1 & 0x4:
+		f1 = f1 | 0x8
 
-    if f2 & 0x4:
-        f2 = f2 | 0x8
+	if f2 & 0x4:
+		f2 = f2 | 0x8
     
-    if (not (f1 & 0x4) and not (f2 & 0x4)):
+	if (not (f1 & 0x4) and not (f2 & 0x4)):
     ##The flag should now indicate this is paired-end data
-        f1 = f1 | 0x1
-        f1 = f1 | 0x2
-        f2 = f2 | 0x1
-        f2 = f2 | 0x2
+		f1 = f1 | 0x1
+		f1 = f1 | 0x2
+		f2 = f2 | 0x1
+		f2 = f2 | 0x2
   
     
   ##Indicate if the pair is on the reverse strand
-    if f1 & 0x10:
-        f2 = f2 | 0x20
+	if f1 & 0x10:
+		f2 = f2 | 0x20
   
-    if f2 & 0x10:
-        f1 = f1 | 0x20
+	if f2 & 0x10:
+		f1 = f1 | 0x20
   
   ##Is this first or the second pair?
-    f1 = f1 | 0x40
-    f2 = f2 | 0x80
+	f1 = f1 | 0x40
+	f2 = f2 | 0x80
   
     ##Insert the modified bitwise flags into the reads
-    read1.flag = f1
-    read2.flag = f2
-
-    ##Determine the RNEXT and PNEXT values (i.e. the positional values of a read's pair)
-    #RNEXT
-    if r1_chrom == r2_chrom:
-        read1.rnext = r1.tid
-        read2.rnext = r1.tid
-    else:
-        read1.rnext = r2.tid
-        read2.rnext = r1.tid
-   
-   #PNEXT
-    read1.pnext = read2.pos
-    read2.pnext = read1.pos
- 
-    return(read1, read2)
+	read1.flag = f1
+	read2.flag = f2
+	
+	##Determine the RNEXT and PNEXT values (i.e. the positional values of a read's pair)
+	#RNEXT
+	if r1_chrom == r2_chrom:
+		read1.next_reference_id = r1.reference_id
+		read2.next_reference_id = r1.reference_id
+	else:
+		read1.next_reference_id = r2.reference_id
+		read2.next_reference_id = r1.reference_id
+   	#PNEXT
+	read1.next_reference_start = read2.reference_start
+	read2.next_reference_start = read1.reference_start
+
+	return(read1, read2)
 
 
 
 if __name__ == "__main__":
     ## Read command line arguments
-    opts = get_args()
-    inputFile = None
-    outputFile = None
-    mapq = None
-    report_single = False
-    report_multi = False
-    verbose = False
-    stat = False
-    output = "-"
-
-    if len(opts) == 0:
-        usage()
-        sys.exit()
-
-    for opt, arg in opts:
-        if opt in ("-h", "--help"):
-            usage()
-            sys.exit()
-        elif opt in ("-f", "--forward"):
-            R1file = arg
-        elif opt in ("-r", "--reverse"):
-            R2file = arg
-        elif opt in ("-o", "--output"):
-            output = arg
-        elif opt in ("-q", "--qual"):
-            mapq = arg
-        elif opt in ("-s", "--single"):
-            report_single = True
-        elif opt in ("-m", "--multi"):
-            report_multi = True
-        elif opt in ("-t", "--stat"):
-            stat = True
-        elif opt in ("-v", "--verbose"):
-            verbose = True
-        else:
-            assert False, "unhandled option"
+	opts = get_args()
+	inputFile = None
+	outputFile = None
+	mapq = None
+	report_single = False
+	report_multi = False
+	verbose = False
+	stat = False
+	output = "-"
+
+	if len(opts) == 0:
+		usage()
+		sys.exit()
+
+	for opt, arg in opts:
+		if opt in ("-h", "--help"):
+			usage()
+			sys.exit()
+		elif opt in ("-f", "--forward"):
+			R1file = arg
+		elif opt in ("-r", "--reverse"):
+			R2file = arg
+		elif opt in ("-o", "--output"):
+			output = arg
+		elif opt in ("-q", "--qual"):
+			mapq = arg
+		elif opt in ("-s", "--single"):
+			report_single = True
+		elif opt in ("-m", "--multi"):
+			report_multi = True
+		elif opt in ("-t", "--stat"):
+			stat = True
+		elif opt in ("-v", "--verbose"):
+			verbose = True
+		else:
+			assert False, "unhandled option"
 
     ## Verbose mode
-    if verbose:
-        print "## mergeBAM.py"
-        print "## forward=", R1file
-        print "## reverse=", R2file
-        print "## output=", output
-        print "## min mapq=", mapq
-        print "## report_single=", report_single
-        print "## report_multi=", report_multi
-        print "## verbose=", verbose
+	if verbose:
+		print("## mergeBAM.py")
+		print("## forward=", R1file)
+		print("## reverse=", R2file)
+		print("## output=", output)
+		print("## min mapq=", mapq)
+		print("## report_single=", report_single)
+		print("## report_multi=", report_multi)
+		print("## verbose=", verbose)
 
     ## Initialize variables
-    tot_pairs_counter = 0
-    multi_pairs_counter = 0
-    uniq_pairs_counter = 0
-    unmapped_pairs_counter = 0 
-    lowq_pairs_counter = 0
-    multi_singles_counter = 0
-    uniq_singles_counter = 0
-    lowq_singles_counter = 0
+	tot_pairs_counter = 0
+	multi_pairs_counter = 0
+	uniq_pairs_counter = 0
+	unmapped_pairs_counter = 0 
+	lowq_pairs_counter = 0
+	multi_singles_counter = 0
+	uniq_singles_counter = 0
+	lowq_singles_counter = 0
 
     #local_counter = 0
-    paired_reads_counter = 0
-    singleton_counter = 0
-    reads_counter = 0
-    r1 = None
-    r2 = None
+	paired_reads_counter = 0
+	singleton_counter = 0
+	reads_counter = 0
+	r1 = None
+	r2 = None
 
     ## Reads are 0-based too (for both SAM and BAM format)
     ## Loop on all reads
-    if verbose:
-        print "## Merging forward and reverse tags ..."
-  
-    with  pysam.Samfile(R1file, "rb") as hr1,  pysam.Samfile(R2file, "rb") as hr2: 
-        if output == "-":
-            outfile = pysam.AlignmentFile(output, "w", template=hr1)
-        else:
-            outfile = pysam.AlignmentFile(output, "wb", template=hr1)
-        for r1, r2 in izip(hr1.fetch(until_eof=True), hr2.fetch(until_eof=True)):
-            reads_counter +=1
+	if verbose:
+		print("## Merging forward and reverse tags ...")
+	with pysam.Samfile(R1file, "rb") as hr1, pysam.Samfile(R2file, "rb") as hr2: 
+		if output == "-":
+			outfile = pysam.AlignmentFile(output, "w", template=hr1)
+		else:
+			outfile = pysam.AlignmentFile(output, "wb", template=hr1)
+		for r1, r2 in zip(hr1.fetch(until_eof=True), hr2.fetch(until_eof=True)):
+			reads_counter +=1
 
             #print r1
             #print r2
             #print hr1.getrname(r1.tid)
             #print hr2.getrname(r2.tid)
 
-            if (reads_counter % 1000000 == 0 and verbose):
-                print "##", reads_counter
+			if (reads_counter % 1000000 == 0 and verbose):
+				print("##", reads_counter)
                 
-            if get_read_name(r1) == get_read_name(r2):
+			if get_read_name(r1) == get_read_name(r2):
                     
                  ## both unmapped
-                if r1.is_unmapped == True and r2.is_unmapped == True:
-                    unmapped_pairs_counter += 1
-                    continue
+				if r1.is_unmapped == True and r2.is_unmapped == True:
+					unmapped_pairs_counter += 1
+					continue
 
                 ## both mapped
-                elif r1.is_unmapped == False and r2.is_unmapped == False:
+				elif r1.is_unmapped == False and r2.is_unmapped == False:
                      ## quality
-                    if mapq != None and (r1.mapping_quality < int(mapq) or r2.mapping_quality < int(mapq)):
-                        lowq_pairs_counter += 1
-                        continue
+					if mapq != None and (r1.mapping_quality < int(mapq) or r2.mapping_quality < int(mapq)):
+						lowq_pairs_counter += 1
+						continue
                  
                      ## Unique mapping
-                    if is_unique_bowtie2(r1) == True and is_unique_bowtie2(r2) == True:
-                        uniq_pairs_counter += 1
-                    else:
-                        multi_pairs_counter += 1
-                        if report_multi == False:
-                            continue
+					if is_unique_bowtie2(r1) == True and is_unique_bowtie2(r2) == True:
+						uniq_pairs_counter += 1
+					else:
+						multi_pairs_counter += 1
+						if report_multi == False:
+							continue
 		# one end mapped, other is not
-                else:
-                    singleton_counter += 1
-                    if report_single == False:
-                        continue
-                    if r1.is_unmapped == False:  ## first end is mapped, second is not
+				else:
+					singleton_counter += 1
+					if report_single == False:
+						continue
+					if r1.is_unmapped == False:  ## first end is mapped, second is not
                          ## quality
-                        if mapq != None and (r1.mapping_quality < int(mapq)): 
-                            lowq_singles_counter += 1
-                            continue
+						if mapq != None and (r1.mapping_quality < int(mapq)): 
+							lowq_singles_counter += 1
+							continue
                          ## Unique mapping
-                        if is_unique_bowtie2(r1) == True:
-                            uniq_singles_counter += 1
-                        else:
-                            multi_singles_counter += 1
-                            if report_multi == False:
-                                continue
-                    else:  ## second end is mapped, first is not
+						if is_unique_bowtie2(r1) == True:
+							uniq_singles_counter += 1
+						else:
+							multi_singles_counter += 1
+							if report_multi == False:
+								continue
+					else:  ## second end is mapped, first is not
                          ## quality
-                        if mapq != None and (r2.mapping_quality < int(mapq)): 
-                            lowq_singles_counter += 1
-                            continue
+						if mapq != None and (r2.mapping_quality < int(mapq)): 
+							lowq_singles_counter += 1
+							continue
                          ## Unique mapping
-                        if is_unique_bowtie2(r2) == True:
-                            uniq_singles_counter += 1
-                        else:
-                            multi_singles_counter += 1
-                            if report_multi == False:
-                                continue
+						if is_unique_bowtie2(r2) == True:
+							uniq_singles_counter += 1
+						else:
+							multi_singles_counter += 1
+							if report_multi == False:
+								continue
 
-                tot_pairs_counter += 1          
-                (r1, r2) = sam_flag(r1,r2, hr1, hr2)
+				tot_pairs_counter += 1          
+				(r1, r2) = sam_flag(r1,r2, hr1, hr2)
 
                 #print hr1.getrname(r1.tid)
                 #print hr2.getrname(r2.tid)
                 #print r1
                 #print r2
                 ## Write output
-                outfile.write(r1)
-                outfile.write(r2)
-
-            else:
-                print "Forward and reverse reads not paired. Check that BAM files have the same read names and are sorted."
-                sys.exit(1)
-
-    if stat:
-        if output == '-':
-            statfile = "pairing.stat"
-        else:
-            statfile = re.sub('\.bam$', '.pairstat', output)
-        handle_stat = open(statfile, 'w')
-            
-        handle_stat.write("Total_pairs_processed\t" + str(reads_counter) + "\t" + str(round(float(reads_counter)/float(reads_counter)*100,3)) + "\n")
-        handle_stat.write("Unmapped_pairs\t" + str(unmapped_pairs_counter) + "\t" + str(round(float(unmapped_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-        handle_stat.write("Low_qual_pairs\t" + str(lowq_pairs_counter) + "\t" + str(round(float(lowq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-        handle_stat.write("Unique_paired_alignments\t" + str(uniq_pairs_counter) + "\t" + str(round(float(uniq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-        handle_stat.write("Multiple_pairs_alignments\t" + str(multi_pairs_counter) + "\t" + str(round(float(multi_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-        handle_stat.write("Pairs_with_singleton\t" + str(singleton_counter) + "\t" + str(round(float(singleton_counter)/float(reads_counter)*100,3)) + "\n")  
-        handle_stat.write("Low_qual_singleton\t" + str(lowq_singles_counter) + "\t" + str(round(float(lowq_singles_counter)/float(reads_counter)*100,3)) + "\n")
-        handle_stat.write("Unique_singleton_alignments\t" + str(uniq_singles_counter) + "\t" + str(round(float(uniq_singles_counter)/float(reads_counter)*100,3)) + "\n")
-        handle_stat.write("Multiple_singleton_alignments\t" + str(multi_singles_counter) + "\t" + str(round(float(multi_singles_counter)/float(reads_counter)*100,3)) + "\n")
-        handle_stat.write("Reported_pairs\t" + str(tot_pairs_counter) + "\t" + str(round(float(tot_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-        handle_stat.close()
-
-    hr1.close()
-    hr2.close()
-    outfile.close()
+				outfile.write(r1)
+				outfile.write(r2)
+
+			else:
+				print("Forward and reverse reads not paired. Check that BAM files have the same read names and are sorted.")
+				sys.exit(1)
+
+	if stat:
+		if output == '-':
+			statfile = "pairing.stat"
+		else:
+			statfile = re.sub('\.bam$', '.pairstat', output)
+		with open(statfile, 'w') as handle_stat:
+			handle_stat.write("Total_pairs_processed\t" + str(reads_counter) + "\t" + str(round(float(reads_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Unmapped_pairs\t" + str(unmapped_pairs_counter) + "\t" + str(round(float(unmapped_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Low_qual_pairs\t" + str(lowq_pairs_counter) + "\t" + str(round(float(lowq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Unique_paired_alignments\t" + str(uniq_pairs_counter) + "\t" + str(round(float(uniq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Multiple_pairs_alignments\t" + str(multi_pairs_counter) + "\t" + str(round(float(multi_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Pairs_with_singleton\t" + str(singleton_counter) + "\t" + str(round(float(singleton_counter)/float(reads_counter)*100,3)) + "\n")  
+			handle_stat.write("Low_qual_singleton\t" + str(lowq_singles_counter) + "\t" + str(round(float(lowq_singles_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Unique_singleton_alignments\t" + str(uniq_singles_counter) + "\t" + str(round(float(uniq_singles_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Multiple_singleton_alignments\t" + str(multi_singles_counter) + "\t" + str(round(float(multi_singles_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Reported_pairs\t" + str(tot_pairs_counter) + "\t" + str(round(float(tot_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+	hr1.close()
+	hr2.close()
+	outfile.close()
 
diff --git a/bin/merge_statfiles.py b/bin/merge_statfiles.py
index ab3d078c657e632471a47b4bc990aa16998cc781..469cacd81b597e296eb3eb5b4acdc500028be927 100755
--- a/bin/merge_statfiles.py
+++ b/bin/merge_statfiles.py
@@ -1,8 +1,8 @@
 #!/usr/bin/env python
 
-## HiC-Pro
-## Copyright (c) 2015 Institut Curie                               
-## Author(s): Nicolas Servant, Eric Viara
+## nf-core-hic
+## Copyright (c) 2020 Institut Curie                               
+## Author(s): Nicolas Servant
 ## Contact: nicolas.servant@curie.fr
 ## This software is distributed without any guarantee under the terms of the BSD-3 licence.
 ## See the LICENCE file for details
@@ -36,13 +36,13 @@ if __name__ == "__main__":
 
     if li > 0:
         if args.verbose:
-            print "## merge_statfiles.py"
-            print "## Merging "+ str(li)+" files"
+            print("## merge_statfiles.py")
+            print("## Merging "+ str(li)+" files")
  
         ## Reading first file to get the template
         template = OrderedDict()
         if args.verbose:
-            print "## Use "+infiles[0]+" as template"
+            print("## Use "+infiles[0]+" as template")
         with open(infiles[0]) as f:
             for line in f:
                 if not line.startswith("#"):
@@ -51,17 +51,17 @@ if __name__ == "__main__":
                     template[str(lsp[0])] = data
                 
         if len(template) == 0:
-            print "Cannot find template files !"
+            print("Cannot find template files !")
             sys.exit(1)
 
         ## Int are counts / Float are percentage
-        for fidx in xrange(1, li):
+        for fidx in list(range(1, li)):
             with open(infiles[fidx]) as f:
                 for line in f:
                     if not line.startswith("#"):
                         lsp = line.strip().split("\t")
                         if lsp[0] in template:
-                            for i in xrange(1, len(lsp)):
+                            for i in list(range(1, len(lsp))):
                                 if isinstance(num(lsp[i]), int):
                                     template[lsp[0]][i-1] += num(lsp[i])
                                 else:
@@ -77,6 +77,6 @@ if __name__ == "__main__":
             sys.stdout.write("\n")
 
     else:
-        print "No files to merge - stop"
+        print("No files to merge - stop")
         sys.exit(1)
 
diff --git a/bin/scrape_software_versions.py b/bin/scrape_software_versions.py
index 7a38feec0135d37268ff82fa0c92dab46c84ac6a..d5f4c5c0095a2006ce4e7a876dc1ac849c020c3a 100755
--- a/bin/scrape_software_versions.py
+++ b/bin/scrape_software_versions.py
@@ -22,11 +22,19 @@ results['MultiQC'] = '<span style="color:#999999;\">N/A</span>'
 
 # Search each file using its regex
 for k, v in regexes.items():
-    with open(v[0]) as x:
-        versions = x.read()
-        match = re.search(v[1], versions)
-        if match:
-            results[k] = "v{}".format(match.group(1))
+    try:
+        with open(v[0]) as x:
+            versions = x.read()
+            match = re.search(v[1], versions)
+            if match:
+                results[k] = "v{}".format(match.group(1))
+    except IOError:
+        results[k] = False
+
+# Remove software set to false in results
+for k in list(results):
+    if not results[k]:
+        del(results[k])
 
 # Remove software set to false in results
 for k in results:
diff --git a/conf/awsbatch.config b/conf/awsbatch.config
deleted file mode 100644
index 14af5866f5c6c18db7e8d6b93b40da8ea8311721..0000000000000000000000000000000000000000
--- a/conf/awsbatch.config
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * -------------------------------------------------
- *  Nextflow config file for running on AWS batch
- * -------------------------------------------------
- * Base config needed for running with -profile awsbatch
- */
-params {
-  config_profile_name = 'AWSBATCH'
-  config_profile_description = 'AWSBATCH Cloud Profile'
-  config_profile_contact = 'Alexander Peltzer (@apeltzer)'
-  config_profile_url = 'https://aws.amazon.com/de/batch/'
-}
-
-aws.region = params.awsregion
-process.executor = 'awsbatch'
-process.queue = params.awsqueue
-executor.awscli = '/home/ec2-user/miniconda/bin/aws'
-params.tracedir = './'
diff --git a/conf/base.config b/conf/base.config
index 28b467901007da4efaf10945d3c2000644f69d90..157dd9548a110b9f2f710d3072850608fa9c2de5 100644
--- a/conf/base.config
+++ b/conf/base.config
@@ -10,68 +10,37 @@
  */
 
 process {
-
-  // Check the defaults for all processes
+  // nf-core: Check the defaults for all processes
   cpus = { check_max( 1 * task.attempt, 'cpus' ) }
-  memory = { check_max( 8.GB * task.attempt, 'memory' ) }
-  time = { check_max( 2.h * task.attempt, 'time' ) }
+  memory = { check_max( 7.GB * task.attempt, 'memory' ) }
+  time = { check_max( 4.h * task.attempt, 'time' ) }
 
   errorStrategy = { task.exitStatus in [143,137,104,134,139] ? 'retry' : 'finish' }
   maxRetries = 1
   maxErrors = '-1'
 
-  // Process-specific resource requirements
-  withName:makeBowtie2Index {
-     cpus = { check_max( 1, 'cpus' ) }
-     memory = { check_max( 10.GB * task.attempt, 'memory' ) }
-     time = { check_max( 12.h * task.attempt, 'time' ) }
-  }
-  withName:bowtie2_end_to_end {
-    cpus = { check_max( 4, 'cpus' ) }
+  withLabel:process_low {
+    cpus = { check_max( 1 * task.attempt, 'cpus' ) }
     memory = { check_max( 4.GB * task.attempt, 'memory' ) }
-    time = { check_max( 5.h * task.attempt, 'time' ) }
+    time = { check_max( 6.h * task.attempt, 'time' ) }
   }
-  withName:bowtie2_on_trimmed_reads {
-    cpus = { check_max( 4, 'cpus' ) }
-    memory = { check_max( 4.GB * task.attempt, 'memory' ) }
-    time = { check_max( 5.h * task.attempt, 'time' ) }
-  }
-  withName:merge_mapping_steps {
-    cpus = { check_max( 4, 'cpus' ) }
+  withLabel:process_medium {
+    cpus = { check_max( 4 * task.attempt, 'cpus' ) }
     memory = { check_max( 8.GB * task.attempt, 'memory' ) }
-    time = { check_max( 5.h * task.attempt, 'time' ) }
+    time = { check_max( 8.h * task.attempt, 'time' ) }
   }
-  withName:trim_reads {
-    cpus = { check_max (1, 'cpus')}
-    memory = { check_max( 1.GB * task.attempt, 'memory' ) }
-    time = { check_max( 5.h * task.attempt, 'time' ) }
+  withLabel:process_high {
+    cpus = { check_max( 8 * task.attempt, 'cpus' ) }
+    memory = { check_max( 64.GB * task.attempt, 'memory' ) }
+    time = { check_max( 10.h * task.attempt, 'time' ) }
   }
-  withName:combine_mapped_files {
-    cpus = { check_max( 1, 'cpus' ) }
-    memory = { check_max( 4.GB * task.attempt, 'memory' ) }
-    time = { check_max( 5.h * task.attempt, 'time' ) }
+  withLabel:process_long {
+    time = { check_max( 20.h * task.attempt, 'time' ) }
   }
-  withName:get_valid_interaction {
-    cpus = { check_max( 1, 'cpus' ) }
-    memory = { check_max( 4.GB * task.attempt, 'memory' ) }
-    time = { check_max( 5.h * task.attempt, 'time' ) }
-  }
-  withName:build_contact_maps {
-    cpus = { check_max( 1, 'cpus' ) }
-    memory = { check_max( 6.GB * task.attempt, 'memory' ) }
-    time = { check_max( 5.h * task.attempt, 'time' ) }
+  withLabel:process_highmem {
+    memory = { check_max( 12.GB * task.attempt, 'memory' ) }
   }
-  withName:run_ice {
-    cpus = { check_max( 1, 'cpus' ) }
-    memory = { check_max( 10.GB * task.attempt, 'memory' ) }
-    time = { check_max( 5.h * task.attempt, 'time' ) }
+  withName:get_software_versions {
+    cache = false
   }
 }
-
-params {
-  // Defaults only, expecting to be overwritten
-  max_memory = 8.GB
-  max_cpus = 4
-  max_time = 24.h
-  igenomes_base = 's3://ngi-igenomes/igenomes/'
-}
diff --git a/conf/curie.config b/conf/curie.config
deleted file mode 100644
index ab85a2d9d778ac3ca875a273e9bbcb7eb966253d..0000000000000000000000000000000000000000
--- a/conf/curie.config
+++ /dev/null
@@ -1,16 +0,0 @@
-singularity {
-  enabled = false 
-}
-
-process {
-  executor = 'pbs'
-  queue = params.queue 
-  //beforeScript = 'export PATH=/bioinfo/pipelines/sandbox/dev/nfcore/rnaseq/modules/conda/envs/nf-core-rnaseq-1.2/bin:$PATH'
-}
-
-params {
-  clusterOptions = false
-  max_memory = 128.GB
-  max_cpus = 4
-  max_time = 240.h
-}
diff --git a/conf/hicpro.config b/conf/hicpro.config
index 0a2c9b9e0db09f4f9861ba353b84a534820aba38..01b755a955c5aee521a6cf43b00847cfbc8d0cd3 100644
--- a/conf/hicpro.config
+++ b/conf/hicpro.config
@@ -38,5 +38,6 @@ params {
 
        saveReference = false
        saveAlignedIntermediates = false
+       saveInteractionBAM = false
 }
 
diff --git a/conf/igenomes.config b/conf/igenomes.config
index 92ad32389c6646cae0feea95e5e0a3bceeba909e..1ba2588593f4e1940dc0bf3a3380f0114a71684e 100644
--- a/conf/igenomes.config
+++ b/conf/igenomes.config
@@ -11,92 +11,152 @@ params {
   // illumina iGenomes reference file paths
   genomes {
     'GRCh37' {
-      fasta   = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2  = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/Bowtie2Index/"
+    }
+    'GRCh38' {
+      fasta       = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/Bowtie2Index/"
     }
     'GRCm38' {
-      fasta   = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/Bowtie2Index/"
     }
     'TAIR10' {
-      fasta   = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/Bowtie2Index/"
     }
     'EB2' {
-      fasta   = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/Bowtie2Index/"
     }
     'UMD3.1' {
-      fasta   = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/Bowtie2Index/"
     }
     'WBcel235' {
-      fasta   = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/Bowtie2Index/"
     }
     'CanFam3.1' {
-      fasta   = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/Bowtie2Index/"
     }
     'GRCz10' {
-      fasta   = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/Bowtie2Index/"
     }
     'BDGP6' {
-      fasta   = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/Bowtie2Index/"
     }
     'EquCab2' {
-      fasta   = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/Bowtie2Index/"
     }
     'EB1' {
-      fasta   = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/Bowtie2Index/"
     }
     'Galgal4' {
-      fasta   = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/Bowtie2Index/"
     }
     'Gm01' {
-      fasta   = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/Bowtie2Index/"
     }
     'Mmul_1' {
-      fasta   = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/Bowtie2Index/"
     }
     'IRGSP-1.0' {
-      fasta   = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/Bowtie2Index/"
     }
     'CHIMP2.1.4' {
-      fasta   = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/Bowtie2Index/"
     }
     'Rnor_6.0' {
-      fasta   = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/Bowtie2Index/"
     }
     'R64-1-1' {
-      fasta   = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/Bowtie2Index/"
     }
     'EF2' {
-      fasta   = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/Bowtie2Index/"
     }
     'Sbi1' {
-      fasta   = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/Bowtie2Index/"
     }
     'Sscrofa10.2' {
-      fasta   = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/Bowtie2Index/"
     }
     'AGPv3' {
-      fasta   = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/WholeGenomeFasta/genome.fa"
-      bowtie2 = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/Bowtie2Index/genome"
+      fasta       = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/Bowtie2Index/"
+    }
+    'hg38' {
+      fasta       = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/Bowtie2Index/"
+    }
+    'hg19' {
+      fasta       = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/"
+    }
+    'mm10' {
+      fasta       = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/Bowtie2Index/"
+    }
+    'bosTau8' {
+      fasta       = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/Bowtie2Index/"
+    }
+    'ce10' {
+      fasta       = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/Bowtie2Index/"
+    }
+    'canFam3' {
+      fasta       = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/Bowtie2Index/"
+    }
+    'danRer10' {
+      fasta       = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/Bowtie2Index/"
+    }
+    'dm6' {
+      fasta       = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/Bowtie2Index/"
+    }
+    'equCab2' {
+      fasta       = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/Bowtie2Index/"
+    }
+    'galGal4' {
+      fasta       = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/Bowtie2Index/"
+    }
+    'panTro4' {
+      fasta       = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/Bowtie2Index/"
+    }
+    'rn6' {
+      fasta       = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/Bowtie2Index/"
+    }
+    'sacCer3' {
+      fasta       = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/Bowtie2Index/"
+    }
+    'susScr3' {
+      fasta       = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/WholeGenomeFasta/genome.fa"
+      bowtie2     = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/Bowtie2Index/"
     }
   }
 }
diff --git a/conf/multiqc_config.yaml b/conf/multiqc_config.yaml
deleted file mode 100644
index f2a738c43be4dae15db5075017559607c66c0542..0000000000000000000000000000000000000000
--- a/conf/multiqc_config.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-report_comment: >
-    This report has been generated by the <a href="https://github.com/nf-core/hic" target="_blank">nf-core/hic</a>
-    analysis pipeline. For information about how to interpret these results, please see the
-    <a href="https://github.com/nf-core/hic" target="_blank">documentation</a>.
-report_section_order:
-    nf-core/hic-software-versions:
-        order: -1000
diff --git a/conf/test.config b/conf/test.config
index 592e3a40d8bce4cf22b5fe1ad9014ded48d439ce..39a2bba88d6da893f0f3ba97f397e77488556873 100644
--- a/conf/test.config
+++ b/conf/test.config
@@ -4,12 +4,12 @@
  * -------------------------------------------------
  * Defines bundled input files and everything required
  * to run a fast and simple test. Use as follows:
- *   nextflow run nf-core/hic -profile test
+ *   nextflow run nf-core/hic -profile test,<docker/singularity>
  */
 
 params {
 
-  config_profile_name = 'Hi-C test data from Schalbetter et al. (2017)'
+config_profile_name = 'Hi-C test data from Schalbetter et al. (2017)'
   config_profile_description = 'Minimal test dataset to check pipeline function'
 
   // Limit resources so that this can run on Travis
@@ -26,8 +26,17 @@ params {
   fasta = 'https://github.com/nf-core/test-datasets/raw/hic/reference/W303_SGD_2015_JRIU00000000.fsa'
   restriction_site = 'A^AGCTT'
   ligation_site = 'AAGCTAGCTT'
-  min_mapq = 0
+  
+  min_mapq = 2
+  rm_dup = true
+  rm_singleton = true
+  rm_multi = true
 
+  min_restriction_fragment_size = 100
+  max_restriction_fragment_size = 100000
+  min_insert_size = 100
+  max_insert_size = 600
+  
   // Options
   skip_cool = true
 }
diff --git a/docs/README.md b/docs/README.md
index d7dbdac40b9452baa3d7d7747d264077276fb679..e160867d029e09c793168dd764f8a0ea01dcbd59 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -2,11 +2,11 @@
 
 The nf-core/hic documentation is split into the following files:
 
-1. [Installation](installation.md)
+1. [Installation](https://nf-co.re/usage/installation)
 2. Pipeline configuration
-    * [Local installation](configuration/local.md)
-    * [Adding your own system](configuration/adding_your_own.md)
-    * [Reference genomes](configuration/reference_genomes.md)
+    * [Local installation](https://nf-co.re/usage/local_installation)
+    * [Adding your own system config](https://nf-co.re/usage/adding_own_config)
+    * [Reference genomes](https://nf-co.re/usage/reference_genomes)
 3. [Running the pipeline](usage.md)
 4. [Output and how to interpret the results](output.md)
-5. [Troubleshooting](troubleshooting.md)
+5. [Troubleshooting](https://nf-co.re/usage/troubleshooting)
diff --git a/docs/configuration/adding_your_own.md b/docs/configuration/adding_your_own.md
deleted file mode 100644
index bf7f80811e08de09bfdca2a94ed1187d1906796f..0000000000000000000000000000000000000000
--- a/docs/configuration/adding_your_own.md
+++ /dev/null
@@ -1,86 +0,0 @@
-# nf-core/hic: Configuration for other clusters
-
-It is entirely possible to run this pipeline on other clusters, though you will need to set up your own config file so that the pipeline knows how to work with your cluster.
-
-> If you think that there are other people using the pipeline who would benefit from your configuration (eg. other common cluster setups), please let us know. We can add a new configuration and profile which can used by specifying `-profile <name>` when running the pipeline. The config file will then be hosted at `nf-core/configs` and will be pulled automatically before the pipeline is executed.
-
-If you are the only person to be running this pipeline, you can create your config file as `~/.nextflow/config` and it will be applied every time you run Nextflow. Alternatively, save the file anywhere and reference it when running the pipeline with `-c path/to/config` (see the [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for more).
-
-A basic configuration comes with the pipeline, which loads the [`conf/base.config`](../../conf/base.config) by default. This means that you only need to configure the specifics for your system and overwrite any defaults that you want to change.
-
-## Cluster Environment
-By default, pipeline uses the `local` Nextflow executor - in other words, all jobs are run in the login session. If you're using a simple server, this may be fine. If you're using a compute cluster, this is bad as all jobs will run on the head node.
-
-To specify your cluster environment, add the following line to your config file:
-
-```nextflow
-process.executor = 'YOUR_SYSTEM_TYPE'
-```
-
-Many different cluster types are supported by Nextflow. For more information, please see the [Nextflow documentation](https://www.nextflow.io/docs/latest/executor.html).
-
-Note that you may need to specify cluster options, such as a project or queue. To do so, use the `clusterOptions` config option:
-
-```nextflow
-process {
-  executor = 'SLURM'
-  clusterOptions = '-A myproject'
-}
-```
-
-
-## Software Requirements
-To run the pipeline, several software packages are required. How you satisfy these requirements is essentially up to you and depends on your system. If possible, we _highly_ recommend using either Docker or Singularity.
-
-Please see the [`installation documentation`](../installation.md) for how to run using the below as a one-off. These instructions are about configuring a config file for repeated use.
-
-### Docker
-Docker is a great way to run nf-core/hic, as it manages all software installations and allows the pipeline to be run in an identical software environment across a range of systems.
-
-Nextflow has [excellent integration](https://www.nextflow.io/docs/latest/docker.html) with Docker, and beyond installing the two tools, not much else is required - nextflow will automatically fetch the [nfcore/hic](https://hub.docker.com/r/nfcore/hic/) image that we have created and is hosted at dockerhub at run time.
-
-To add docker support to your own config file, add the following:
-
-```nextflow
-docker.enabled = true
-process.container = "nfcore/hic"
-```
-
-Note that the dockerhub organisation name annoyingly can't have a hyphen, so is `nfcore` and not `nf-core`.
-
-
-### Singularity image
-Many HPC environments are not able to run Docker due to security issues.
-[Singularity](http://singularity.lbl.gov/) is a tool designed to run on such HPC systems which is very similar to Docker.
-
-To specify singularity usage in your pipeline config file, add the following:
-
-```nextflow
-singularity.enabled = true
-process.container = "shub://nf-core/hic"
-```
-
-If you intend to run the pipeline offline, nextflow will not be able to automatically download the singularity image for you.
-Instead, you'll have to do this yourself manually first, transfer the image file and then point to that.
-
-First, pull the image file where you have an internet connection:
-
-```bash
-singularity pull --name nf-core-hic.simg shub://nf-core/hic
-```
-
-Then transfer this file and point the config file to the image:
-
-```nextflow
-singularity.enabled = true
-process.container = "/path/to/nf-core-hic.simg"
-```
-
-
-### Conda
-If you're not able to use Docker or Singularity, you can instead use conda to manage the software requirements.
-To use conda in your own config file, add the following:
-
-```nextflow
-process.conda = "$baseDir/environment.yml"
-```
diff --git a/docs/configuration/local.md b/docs/configuration/local.md
deleted file mode 100644
index d4530fa9007866b32cf2dda77ed780c4fe19f1e8..0000000000000000000000000000000000000000
--- a/docs/configuration/local.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# nf-core/hic: Local Configuration
-
-If running the pipeline in a local environment, we highly recommend using either Docker or Singularity.
-
-## Docker
-Docker is a great way to run `nf-core/hic`, as it manages all software installations and allows the pipeline to be run in an identical software environment across a range of systems.
-
-Nextflow has [excellent integration](https://www.nextflow.io/docs/latest/docker.html) with Docker, and beyond installing the two tools, not much else is required. The `nf-core/hic` profile comes with a configuration profile for docker, making it very easy to use. This also comes with the required presets to use the AWS iGenomes resource, meaning that if using common reference genomes you just specify the reference ID and it will be automatically downloaded from AWS S3.
-
-First, install docker on your system: [Docker Installation Instructions](https://docs.docker.com/engine/installation/)
-
-Then, simply run the analysis pipeline:
-
-```bash
-nextflow run nf-core/hic -profile docker --genome '<genome ID>'
-```
-
-Nextflow will recognise `nf-core/hic` and download the pipeline from GitHub. The `-profile docker` configuration lists the [nf-core/hic](https://hub.docker.com/r/nfcore/hic/) image that we have created and is hosted at dockerhub, and this is downloaded.
-
-For more information about how to work with reference genomes, see [`docs/configuration/reference_genomes.md`](reference_genomes.md).
-
-### Pipeline versions
-The public docker images are tagged with the same version numbers as the code, which you can use to ensure reproducibility. When running the pipeline, specify the pipeline version with `-r`, for example `-r 1.0`. This uses pipeline code and docker image from this tagged version.
-
-
-## Singularity image
-Many HPC environments are not able to run Docker due to security issues. [Singularity](http://singularity.lbl.gov/) is a tool designed to run on such HPC systems which is very similar to Docker. Even better, it can use create images directly from dockerhub.
-
-To use the singularity image for a single run, use `-with-singularity`. This will download the docker container from dockerhub and create a singularity image for you dynamically.
-
-If you intend to run the pipeline offline, nextflow will not be able to automatically download the singularity image for you. Instead, you'll have to do this yourself manually first, transfer the image file and then point to that.
-
-First, pull the image file where you have an internet connection:
-
-> NB: The "tag" at the end of this command corresponds to the pipeline version.
-> Here, we're pulling the docker image for version 1.0 of the nf-core/hic pipeline
-> Make sure that this tag corresponds to the version of the pipeline that you're using
-
-```bash
-singularity pull --name nf-core-hic-1.0.img docker://nf-core/hic:1.0
-```
-
-Then transfer this file and run the pipeline with this path:
-
-```bash
-nextflow run /path/to/nf-core-hic -with-singularity /path/to/nf-core-hic-1.0.img
-```
diff --git a/docs/configuration/reference_genomes.md b/docs/configuration/reference_genomes.md
deleted file mode 100644
index c52faf821ad7e676ed56dc04c92aae165ea573c7..0000000000000000000000000000000000000000
--- a/docs/configuration/reference_genomes.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# nf-core/hic: Reference Genomes Configuration
-
-The nf-core/hic pipeline needs a reference genome for alignment and annotation.
-
-These paths can be supplied on the command line at run time (see the [usage docs](../usage.md)),
-but for convenience it's often better to save these paths in a nextflow config file.
-See below for instructions on how to do this.
-Read [Adding your own system](adding_your_own.md) to find out how to set up custom config files.
-
-## Adding paths to a config file
-Specifying long paths every time you run the pipeline is a pain.
-To make this easier, the pipeline comes configured to understand reference genome keywords which correspond to preconfigured paths, meaning that you can just specify `--genome ID` when running the pipeline.
-
-Note that this genome key can also be specified in a config file if you always use the same genome.
-
-To use this system, add paths to your config file using the following template:
-
-```nextflow
-params {
-  genomes {
-    'YOUR-ID' {
-      fasta  = '<PATH TO FASTA FILE>/genome.fa'
-    }
-    'OTHER-GENOME' {
-      // [..]
-    }
-  }
-  // Optional - default genome. Ignored if --genome 'OTHER-GENOME' specified on command line
-  genome = 'YOUR-ID'
-}
-```
-
-You can add as many genomes as you like as long as they have unique IDs.
-
-## illumina iGenomes
-To make the use of reference genomes easier, illumina has developed a centralised resource called [iGenomes](https://support.illumina.com/sequencing/sequencing_software/igenome.html).
-Multiple reference index types are held together with consistent structure for multiple genomes.
-
-We have put a copy of iGenomes up onto AWS S3 hosting and this pipeline is configured to use this by default.
-The hosting fees for AWS iGenomes are currently kindly funded by a grant from Amazon.
-The pipeline will automatically download the required reference files when you run the pipeline.
-For more information about the AWS iGenomes, see [AWS-iGenomes](https://ewels.github.io/AWS-iGenomes/)
-
-Downloading the files takes time and bandwidth, so we recommend making a local copy of the iGenomes resource.
-Once downloaded, you can customise the variable `params.igenomes_base` in your custom configuration file to point to the reference location.
-For example:
-
-```nextflow
-params.igenomes_base = '/path/to/data/igenomes/'
-```
diff --git a/docs/images/nf-core-hic_logo.png b/docs/images/nf-core-hic_logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..e5fead372861ff430d7f1428e15dad9b045523e8
Binary files /dev/null and b/docs/images/nf-core-hic_logo.png differ
diff --git a/docs/installation.md b/docs/installation.md
deleted file mode 100644
index 9ac66d585871d374c90df1f14b2c192f2d24b7a8..0000000000000000000000000000000000000000
--- a/docs/installation.md
+++ /dev/null
@@ -1,110 +0,0 @@
-# nf-core/hic: Installation
-
-To start using the nf-core/hic pipeline, follow the steps below:
-
-1. [Install Nextflow](#1-install-nextflow)
-2. [Install the pipeline](#2-install-the-pipeline)
-    * [Automatic](#21-automatic)
-    * [Offline](#22-offline)
-    * [Development](#23-development)
-3. [Pipeline configuration](#3-pipeline-configuration)
-    * [Software deps: Docker and Singularity](#31-software-deps-docker-and-singularity)
-    * [Software deps: Bioconda](#32-software-deps-bioconda)
-    * [Configuration profiles](#33-configuration-profiles)
-4. [Reference genomes](#4-reference-genomes)
-
-## 1) Install NextFlow
-Nextflow runs on most POSIX systems (Linux, Mac OSX etc). It can be installed by running the following commands:
-
-```bash
-# Make sure that Java v8+ is installed:
-java -version
-
-# Install Nextflow
-curl -fsSL get.nextflow.io | bash
-
-# Add Nextflow binary to your PATH:
-mv nextflow ~/bin/
-# OR system-wide installation:
-# sudo mv nextflow /usr/local/bin
-```
-
-See [nextflow.io](https://www.nextflow.io/) for further instructions on how to install and configure Nextflow.
-
-## 2) Install the pipeline
-
-#### 2.1) Automatic
-This pipeline itself needs no installation - NextFlow will automatically fetch it from GitHub if `nf-core/hic` is specified as the pipeline name.
-
-#### 2.2) Offline
-The above method requires an internet connection so that Nextflow can download the pipeline files. If you're running on a system that has no internet connection, you'll need to download and transfer the pipeline files manually:
-
-```bash
-wget https://github.com/nf-core/hic/archive/master.zip
-mkdir -p ~/my-pipelines/nf-core/
-unzip master.zip -d ~/my-pipelines/nf-core/
-cd ~/my_data/
-nextflow run ~/my-pipelines/nf-core/hic-master
-```
-
-To stop nextflow from looking for updates online, you can tell it to run in offline mode by specifying the following environment variable in your ~/.bashrc file:
-
-```bash
-export NXF_OFFLINE='TRUE'
-```
-
-#### 2.3) Development
-
-If you would like to make changes to the pipeline, it's best to make a fork on GitHub and then clone the files. Once cloned you can run the pipeline directly as above.
-
-
-## 3) Pipeline configuration
-By default, the pipeline loads a basic server configuration [`conf/base.config`](../conf/base.config)
-This uses a number of sensible defaults for process requirements and is suitable for running
-on a simple (if powerful!) local server.
-
-Be warned of two important points about this default configuration:
-
-1. The default profile uses the `local` executor
-    * All jobs are run in the login session. If you're using a simple server, this may be fine. If you're using a compute cluster, this is bad as all jobs will run on the head node.
-    * See the [nextflow docs](https://www.nextflow.io/docs/latest/executor.html) for information about running with other hardware backends. Most job scheduler systems are natively supported.
-2. Nextflow will expect all software to be installed and available on the `PATH`
-    * It's expected to use an additional config profile for docker, singularity or conda support. See below.
-
-#### 3.1) Software deps: Docker
-First, install docker on your system: [Docker Installation Instructions](https://docs.docker.com/engine/installation/)
-
-Then, running the pipeline with the option `-profile docker` tells Nextflow to enable Docker for this run. An image containing all of the software requirements will be automatically fetched and used from [dockerhub](https://hub.docker.com/r/nfcore/hic).
-
-#### 3.1) Software deps: Singularity
-If you're not able to use Docker then [Singularity](http://singularity.lbl.gov/) is a great alternative.
-The process is very similar: running the pipeline with the option `-profile singularity` tells Nextflow to enable singularity for this run. An image containing all of the software requirements will be automatically fetched and used from singularity hub.
-
-If running offline with Singularity, you'll need to download and transfer the Singularity image first:
-
-```bash
-singularity pull --name nf-core-hic.simg shub://nf-core/hic
-```
-
-Once transferred, use `-with-singularity` and specify the path to the image file:
-
-```bash
-nextflow run /path/to/nf-core-hic -with-singularity nf-core-hic.simg
-```
-
-Remember to pull updated versions of the singularity image if you update the pipeline.
-
-
-#### 3.2) Software deps: conda
-If you're not able to use Docker _or_ Singularity, you can instead use conda to manage the software requirements.
-This is slower and less reproducible than the above, but is still better than having to install all requirements yourself!
-The pipeline ships with a conda environment file and nextflow has built-in support for this.
-To use it first ensure that you have conda installed (we recommend [miniconda](https://conda.io/miniconda.html)), then follow the same pattern as above and use the flag `-profile conda`
-
-#### 3.3) Configuration profiles
-
-See [`docs/configuration/adding_your_own.md`](configuration/adding_your_own.md)
-
-## 4) Reference genomes
-
-See [`docs/configuration/reference_genomes.md`](configuration/reference_genomes.md)
diff --git a/docs/output.md b/docs/output.md
index 53c9c0c7c20b11e85acd758e4f7b157116ef2378..a83d0dae9b5a742b799f055163dd7dde2da77712 100644
--- a/docs/output.md
+++ b/docs/output.md
@@ -1,8 +1,11 @@
 # nf-core/hic: Output
 
-This document describes the output produced by the pipeline. Most of the plots are taken from the MultiQC report, which summarises results at the end of the pipeline.
+This document describes the output produced by the pipeline. Most of the plots
+are taken from the MultiQC report, which summarises results at the end of the
+pipeline.
 
 ## Pipeline overview
+
 The pipeline is built using [Nextflow](https://www.nextflow.io/)
 and processes data using the following steps:
 
@@ -10,27 +13,38 @@ and processes data using the following steps:
 * [Valid pairs detection](#valid-pairs-detection)
 * [Duplicates removal](#duplicates-removal)
 * [Contact maps](#contact-maps)
-* [MultiQC](#multiqc) - aggregate report and quality controls, describing results of the whole pipeline
-* [Export](#exprot) - additionnal export for compatibility with downstream analysis tool and visualization
+* [MultiQC](#multiqc) - aggregate report and quality controls, describing
+results of the whole pipeline
+* [Export](#exprot) - additionnal export for compatibility with downstream
+analysis tool and visualization
 
-The current version is mainly based on the [HiC-Pro](https://github.com/nservant/HiC-Pro) pipeline.
-For details about the workflow, see [Servant et al. 2015](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-015-0831-x)
+The current version is mainly based on the
+[HiC-Pro](https://github.com/nservant/HiC-Pro) pipeline.
+For details about the workflow, see
+[Servant et al. 2015](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-015-0831-x)
 
 ## Reads alignment
 
-Using Hi-C data, each reads mate has to be independantly aligned on the reference genome.
-The current workflow implements a two steps mapping strategy. First, the reads are aligned using an end-to-end aligner.
-Second, reads spanning the ligation junction are trimmmed from their 3' end, and aligned back on the genome.
-Aligned reads for both fragment mates are then paired in a single paired-end BAM file.
-Singletons are discarded, and multi-hits are filtered according to the configuration parameters (`--rm-multi`).
-Note that if the `--dnase` mode is activated, HiC-Pro will skip the second mapping step.
+Using Hi-C data, each reads mate has to be independantly aligned on the
+reference genome.
+The current workflow implements a two steps mapping strategy. First, the reads
+are aligned using an end-to-end aligner.
+Second, reads spanning the ligation junction are trimmmed from their 3' end,
+and aligned back on the genome.
+Aligned reads for both fragment mates are then paired in a single paired-end
+BAM file.
+Singletons are discarded, and multi-hits are filtered according to the
+configuration parameters (`--rm-multi`).
+Note that if the `--dnase` mode is activated, HiC-Pro will skip the second
+mapping step.
 
 **Output directory: `results/mapping`**
 
 * `*bwt2pairs.bam` - final BAM file with aligned paired data
 * `*.pairstat` - mapping statistics
 
-if `--saveAlignedIntermediates` is specified, additional mapping file results are available ;
+if `--saveAlignedIntermediates` is specified, additional mapping file results
+are available ;
 
 * `*.bam` - Aligned reads (R1 and R2) from end-to-end alignment
 * `*_unmap.fastq` - Unmapped reads after end-to-end alignment
@@ -39,68 +53,117 @@ if `--saveAlignedIntermediates` is specified, additional mapping file results ar
 * `*bwt2merged.bam` - merged BAM file after the two-steps alignment
 * `*.mapstat` - mapping statistics per read mate
 
-Usually, a high fraction of reads is expected to be aligned on the genome (80-90%). Among them, we usually observed a few percent (around 10%) of step 2 aligned reads. Those reads are chimeric fragments for which we detect a ligation junction. An abnormal level of chimeric reads can reflect a ligation issue during the library preparation.
-The fraction of singleton or multi-hits depends on the genome complexity and the fraction of unmapped reads. The fraction of singleton is usually close to the sum of unmapped R1 and R2 reads, as it is unlikely that both mates from the same pair were unmapped.
+Usually, a high fraction of reads is expected to be aligned on the genome
+(80-90%). Among them, we usually observed a few percent (around 10%) of step 2
+aligned reads. Those reads are chimeric fragments for which we detect a
+ligation junction. An abnormal level of chimeric reads can reflect a ligation
+issue during the library preparation.
+The fraction of singleton or multi-hits depends on the genome complexity and
+the fraction of unmapped reads. The fraction of singleton is usually close to
+the sum of unmapped R1 and R2 reads, as it is unlikely that both mates from the
+same pair were unmapped.
 
 ## Valid pairs detection
 
-Each aligned reads can be assigned to one restriction fragment according to the reference genome and the digestion protocol.
+Each aligned reads can be assigned to one restriction fragment according to the
+reference genome and the digestion protocol.
 
 Invalid pairs are classified as follow:
-* Dangling end, i.e. unligated fragments (both reads mapped on the same restriction fragment)
-* Self circles, i.e. fragments ligated on themselves (both reads mapped on the same restriction fragment in inverted orientation)
-* Religation, i.e. ligation of juxtaposed fragments
-* Filtered pairs, i.e. any pairs that do not match the filtering criteria on inserts size, restriction fragments size
-* Dumped pairs, i.e. any pairs for which we were not able to reconstruct the ligation product.
-
-Only valid pairs involving two different restriction fragments are used to build the contact maps.
-Duplicated valid pairs associated to PCR artefacts are discarded (see `--rm_dup`.
 
-In case of Hi-C protocols that do not require a restriction enzyme such as DNase Hi-C or micro Hi-C, the assignment to a restriction is not possible (see `--dnase`).
-Short range interactions that are likely to be spurious ligation products can thus be discarded using the `--min_cis_dist` parameter.
+* Dangling end, i.e. unligated fragments (both reads mapped on the same
+restriction fragment)
+* Self circles, i.e. fragments ligated on themselves (both reads mapped on the
+same restriction fragment in inverted orientation)
+* Religation, i.e. ligation of juxtaposed fragments
+* Filtered pairs, i.e. any pairs that do not match the filtering criteria on
+inserts size, restriction fragments size
+* Dumped pairs, i.e. any pairs for which we were not able to reconstruct the
+ligation product.
+
+Only valid pairs involving two different restriction fragments are used to
+build the contact maps.
+Duplicated valid pairs associated to PCR artefacts are discarded
+(see `--rm_dup`).
+
+In case of Hi-C protocols that do not require a restriction enzyme such as
+DNase Hi-C or micro Hi-C, the assignment to a restriction is not possible
+(see `--dnase`).
+Short range interactions that are likely to be spurious ligation products
+can thus be discarded using the `--min_cis_dist` parameter.
 
 * `*.validPairs` - List of valid ligation products
+* `*.DEpairs` - List of dangling-end products
+* `*.SCPairs` - List of self-circle products
+* `*.REPairs` - List of religation products
+* `*.FiltPairs` - List of filtered pairs
 * `*RSstat` - Statitics of number of read pairs falling in each category
 
 The validPairs are stored using a simple tab-delimited text format ;
 
 ```bash
-read name / chr_reads1 / pos_reads1 / strand_reads1 / chr_reads2 / pos_reads2 / strand_reads2 / fragment_size / res frag name R1 / res frag R2 / mapping qual R1 / mapping qual R2 [/ allele_specific_tag]
+read name / chr_reads1 / pos_reads1 / strand_reads1 / chr_reads2 / pos_reads2 /
+strand_reads2 / fragment_size / res frag name R1 / res frag R2 / mapping qual R1
+/ mapping qual R2 [/ allele_specific_tag]
 ```
 
-The ligation efficiency can be assessed using the filtering of valid and invalid pairs. As the ligation is a random process, 25% of each valid ligation class is expected. In the same way, a high level of dangling-end or self-circle read pairs is associated with a low quality experiment, and reveals a problem during the digestion, fill-in or ligation steps.
+The ligation efficiency can be assessed using the filtering of valid and
+invalid pairs. As the ligation is a random process, 25% of each valid ligation
+class is expected. In the same way, a high level of dangling-end or self-circle
+read pairs is associated with a low quality experiment, and reveals a problem
+during the digestion, fill-in or ligation steps.
 
-In the context of Hi-C protocol without restriction enzyme, this analysis step is skipped. The aligned pairs are therefore directly used to generate the contact maps. A filter of the short range contact (typically <1kb) is recommanded as this pairs are likely to be self ligation products.
+In the context of Hi-C protocol without restriction enzyme, this analysis step
+is skipped. The aligned pairs are therefore directly used to generate the
+contact maps. A filter of the short range contact (typically <1kb) is
+recommanded as this pairs are likely to be self ligation products.
 
 ## Duplicates removal
 
 Note that validPairs file are generated per reads chunck.
-These files are then merged in the allValidPairs file, and duplicates are removed if the `--rm_dup` parameter is used.
+These files are then merged in the allValidPairs file, and duplicates are
+removed if the `--rm_dup` parameter is used.
 
 * `*allValidPairs` - combined valid pairs from all read chunks
 * `*mergestat` - statistics about duplicates removal and valid pairs information
 
-Additional quality controls such as fragment size distribution can be extracted from the list of valid interaction products.
-We usually expect to see a distribution centered around 300 pb which correspond to the paired-end insert size commonly used.
-The fraction of dplicates is also presented. A high level of duplication indicates a poor molecular complexity and a potential PCR bias.
-Finaly, an important metric is to look at the fraction of intra and inter-chromosomal interactions, as well as long range (>20kb) versus short range (<20kb) intra-chromosomal interactions.
+Additional quality controls such as fragment size distribution can be extracted
+from the list of valid interaction products.
+We usually expect to see a distribution centered around 300 pb which correspond
+to the paired-end insert size commonly used.
+The fraction of dplicates is also presented. A high level of duplication
+indicates a poor molecular complexity and a potential PCR bias.
+Finaly, an important metric is to look at the fraction of intra and
+inter-chromosomal interactions, as well as long range (>20kb) versus short
+range (<20kb) intra-chromosomal interactions.
 
 ## Contact maps
 
 Intra et inter-chromosomal contact maps are build for all specified resolutions.
-The genome is splitted into bins of equal size. Each valid interaction is associated with the genomic bins to generate the raw maps.
-In addition, Hi-C data can contain several sources of biases which has to be corrected.
-The current workflow uses the [ìced](https://github.com/hiclib/iced) and [Varoquaux and Servant, 2018](http://joss.theoj.org/papers/10.21105/joss.01286) python package which proposes a fast implementation of the original ICE normalization algorithm (Imakaev et al. 2012), making the assumption of equal visibility of each fragment.
+The genome is splitted into bins of equal size. Each valid interaction is
+associated with the genomic bins to generate the raw maps.
+In addition, Hi-C data can contain several sources of biases which has to be
+corrected.
+The current workflow uses the [ìced](https://github.com/hiclib/iced) and
+[Varoquaux and Servant, 2018](http://joss.theoj.org/papers/10.21105/joss.01286)
+python package which proposes a fast implementation of the original ICE
+normalization algorithm (Imakaev et al. 2012), making the assumption of equal
+visibility of each fragment.
 
 * `*.matrix` - genome-wide contact maps
 * `*_iced.matrix` - genome-wide iced contact maps
 
-The contact maps are generated for all specified resolution (see `--bin_size` argument)
+The contact maps are generated for all specified resolution
+(see `--bin_size` argument)
 A contact map is defined by :
+
 * A list of genomic intervals related to the specified resolution (BED format).
 * A matrix, stored as standard triplet sparse format (i.e. list format).
 
-Based on the observation that a contact map is symmetric and usually sparse, only non-zero values are stored for half of the matrix. The user can specified if the 'upper', 'lower' or 'complete' matrix has to be stored. The 'asis' option allows to store the contacts as they are observed from the valid pairs files.
+Based on the observation that a contact map is symmetric and usually sparse,
+only non-zero values are stored for half of the matrix. The user can specified
+if the 'upper', 'lower' or 'complete' matrix has to be stored. The 'asis'
+option allows to store the contacts as they are observed from the valid pairs
+files.
 
 ```bash
    A   B   10
@@ -109,19 +172,27 @@ Based on the observation that a contact map is symmetric and usually sparse, onl
    (...)
 ```
 
-This format is memory efficient, and is compatible with several software for downstream analysis.
+This format is memory efficient, and is compatible with several software for
+downstream analysis.
 
 ## MultiQC
 
-[MultiQC](http://multiqc.info) is a visualisation tool that generates a single HTML report summarising all samples in your project. Most of the pipeline QC results are visualised in the report and further statistics are available in within the report data directory.
+[MultiQC](http://multiqc.info) is a visualisation tool that generates a single
+HTML report summarising all samples in your project. Most of the pipeline QC
+results are visualised in the report and further statistics are available in
+within the report data directory.
 
-The pipeline has special steps which allow the software versions used to be reported in the MultiQC output for future traceability.
+The pipeline has special steps which allow the software versions used to be
+reported in the MultiQC output for future traceability.
 
 **Output directory: `results/multiqc`**
 
 * `Project_multiqc_report.html`
-  * MultiQC report - a standalone HTML file that can be viewed in your web browser
+  * MultiQC report - a standalone HTML file that can be viewed in your
+web browser
 * `Project_multiqc_data/`
-  * Directory containing parsed statistics from the different tools used in the pipeline
+  * Directory containing parsed statistics from the different tools used
+in the pipeline
 
-For more information about how to use MultiQC reports, see [http://multiqc.info](http://multiqc.info)
+For more information about how to use MultiQC reports, see
+[http://multiqc.info](http://multiqc.info)
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
deleted file mode 100644
index e0f2d0774afa327390d3e3cb33c7c3b1e6c829fb..0000000000000000000000000000000000000000
--- a/docs/troubleshooting.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# nf-core/hic: Troubleshooting
-
-## Input files not found
-
-If only no file, only one input file , or only read one and not read two is picked up then something is wrong with your input file declaration
-
-1. The path must be enclosed in quotes (`'` or `"`)
-2. The path must have at least one `*` wildcard character. This is even if you are only running one paired end sample.
-3. When using the pipeline with paired end data, the path must use `{1,2}` or `{R1,R2}` notation to specify read pairs.
-4. If you are running Single end data make sure to specify `--singleEnd`
-
-If the pipeline can't find your files then you will get the following error
-
-```bash
-ERROR ~ Cannot find any reads matching: *{1,2}.fastq.gz
-```
-
-Note that if your sample name is "messy" then you have to be very particular with your glob specification. A file name like `L1-1-D-2h_S1_L002_R1_001.fastq.gz` can be difficult enough for a human to read. Specifying `*{1,2}*.gz` wont work give you what you want Whilst `*{R1,R2}*.gz` will.
-
-
-## Data organization
-The pipeline can't take a list of multiple input files - it takes a glob expression. If your input files are scattered in different paths then we recommend that you generate a directory with symlinked files. If running in paired end mode please make sure that your files are sensibly named so that they can be properly paired. See the previous point.
-
-## Extra resources and getting help
-If you still have an issue with running the pipeline then feel free to contact us.
-Have a look at the [pipeline website](https://github.com/nf-core/hic) to find out how.
-
-If you have problems that are related to Nextflow and not our pipeline then check out the [Nextflow gitter channel](https://gitter.im/nextflow-io/nextflow) or the [google group](https://groups.google.com/forum/#!forum/nextflow).
diff --git a/docs/usage.md b/docs/usage.md
index d166cf6cf2c345c72af52e9c56d436302d6e8e1d..cef7bf3b60752e918d11dbfb9023aed8ca2d9242 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -2,10 +2,11 @@
 
 ## Table of contents
 
-* [Introduction](#general-nextflow-info)
+* [Table of contents](#table-of-contents)
+* [Introduction](#introduction)
 * [Running the pipeline](#running-the-pipeline)
-* [Updating the pipeline](#updating-the-pipeline)
-* [Reproducibility](#reproducibility)
+  * [Updating the pipeline](#updating-the-pipeline)
+  * [Reproducibility](#reproducibility)
 * [Main arguments](#main-arguments)
   * [`-profile`](#-profile-single-dash)
     * [`awsbatch`](#awsbatch)
@@ -51,11 +52,12 @@
     * [`--splitFastq`](#--splitFastq)
     * [`--saveReference`](#--saveReference)
     * [`--saveAlignedIntermediates`](#--saveAlignedIntermediates)
+    * [`--saveInteractionBAM`](#--saveInteractionBAM)
 * [Skip options](#skip-options)
-  * [--skip_maps](#--skip_maps)
-  * [--skip_ice](#--skip_ice)
-  * [--skip_cool](#--skip_cool)
-  * [--skip_multiqc](#--skip_multiqc)  
+  * [--skipMaps](#--skipMaps)
+  * [--skipIce](#--skipIce)
+  * [--skipCool](#--skipCool)
+  * [--skipMultiQC](#--skipMultiQC)  
 * [Job resources](#job-resources)
 * [Automatic resubmission](#automatic-resubmission)
 * [Custom resource requests](#custom-resource-requests)
@@ -66,34 +68,46 @@
 * [Other command line parameters](#other-command-line-parameters)
   * [`--outdir`](#--outdir)
   * [`--email`](#--email)
+  * [`--email_on_fail`](#--email_on_fail)
+  * [`--max_multiqc_email_size`](#--max_multiqc_email_size)
   * [`-name`](#-name-single-dash)
   * [`-resume`](#-resume-single-dash)
   * [`-c`](#-c-single-dash)
   * [`--custom_config_version`](#--custom_config_version)
+  * [`--custom_config_base`](#--custom_config_base)
   * [`--max_memory`](#--max_memory)
   * [`--max_time`](#--max_time)
   * [`--max_cpus`](#--max_cpus)
   * [`--plaintext_email`](#--plaintext_email)
+  * [`--monochrome_logs`](#--monochrome_logs)
   * [`--multiqc_config`](#--multiqc_config)
 
+## Introduction
 
-## General Nextflow info
-Nextflow handles job submissions on SLURM or other environments, and supervises running the jobs. Thus the Nextflow process must run until the pipeline is finished. We recommend that you put the process running in the background through `screen` / `tmux` or similar tool. Alternatively you can run nextflow within a cluster job submitted your job scheduler.
+Nextflow handles job submissions on SLURM or other environments, and supervises
+running the jobs. Thus the Nextflow process must run until the pipeline is
+finished. We recommend that you put the process running in the background
+through `screen` / `tmux` or similar tool. Alternatively you can run nextflow
+within a cluster job submitted your job scheduler.
 
-It is recommended to limit the Nextflow Java virtual machines memory. We recommend adding the following line to your environment (typically in `~/.bashrc` or `~./bash_profile`):
+It is recommended to limit the Nextflow Java virtual machines memory.
+We recommend adding the following line to your environment (typically
+in `~/.bashrc` or `~./bash_profile`):
 
 ```bash
 NXF_OPTS='-Xms1g -Xmx4g'
 ```
 
 ## Running the pipeline
+
 The typical command for running the pipeline is as follows:
 
 ```bash
-nextflow run nf-core/hic --reads '*_R{1,2}.fastq.gz' -genome GRCh37 -profile docker
+nextflow run nf-core/hic --reads '*_R{1,2}.fastq.gz' --genome GRCh37 -profile docker
 ```
 
-This will launch the pipeline with the `docker` configuration profile. See below for more information about profiles.
+This will launch the pipeline with the `docker` configuration profile.
+See below for more information about profiles.
 
 Note that the pipeline will create the following files in your working directory:
 
@@ -105,43 +119,84 @@ results         # Finished results (configurable, see below)
 ```
 
 ### Updating the pipeline
-When you run the above command, Nextflow automatically pulls the pipeline code from GitHub and stores it as a cached version. When running the pipeline after this, it will always use the cached version if available - even if the pipeline has been updated since. To make sure that you're running the latest version of the pipeline, make sure that you regularly update the cached version of the pipeline:
+
+When you run the above command, Nextflow automatically pulls the pipeline code
+from GitHub and stores it as a cached version. When running the pipeline after
+this, it will always use the cached version if available - even if the pipeline
+has been updated since. To make sure that you're running the latest version of
+the pipeline, make sure that you regularly update the cached version of the
+pipeline:
 
 ```bash
 nextflow pull nf-core/hic
 ```
 
 ### Reproducibility
-It's a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since.
 
-First, go to the [nf-core/hic releases page](https://github.com/nf-core/hic/releases) and find the latest version number - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`.
+It's a good idea to specify a pipeline version when running the pipeline on
+your data. This ensures that a specific version of the pipeline code and
+software are used when you run your pipeline. If you keep using the same tag,
+you'll be running the same version of the pipeline, even if there have been
+changes to the code since.
 
-This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future.
+It's a good idea to specify a pipeline version when running the pipeline on
+your data. This ensures that a specific version of the pipeline code and
+software are used when you run your pipeline. If you keep using the same tag,
+you'll be running the same version of the pipeline, even if there have been
+changes to the code since.
 
+First, go to the
+[nf-core/hic releases page](https://github.com/nf-core/hic/releases) and find
+the latest version number - numeric only (eg. `1.3.1`).
+Then specify this when running the pipeline with `-r` (one hyphen)
+eg. `-r 1.3.1`.
+
+This version number will be logged in reports when you run the pipeline, so
+that you'll know what you used when you look back in the future.
 
 ## Main arguments
 
 ### `-profile`
-Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments. Note that multiple profiles can be loaded, for example: `-profile docker` - the order of arguments is important!
 
-If `-profile` is not specified at all the pipeline will be run locally and expects all software to be installed and available on the `PATH`.
+Use this parameter to choose a configuration profile. Profiles can give
+configuration presets for different compute environments.
+
+Several generic profiles are bundled with the pipeline which instruct
+the pipeline to use software packaged using different methods
+(Docker, Singularity, Conda) - see below.
+
+> We highly recommend the use of Docker or Singularity containers for full
+pipeline reproducibility, however when this is not possible, Conda is also supported.
+
+The pipeline also dynamically loads configurations from
+[https://github.com/nf-core/configs](https://github.com/nf-core/configs) when it runs,
+making multiple config profiles for various institutional clusters available at run time.
+For more information and to see if your system is available in these configs please see
+the [nf-core/configs documentation](https://github.com/nf-core/configs#documentation).
+
+Note that multiple profiles can be loaded, for example: `-profile test,docker` - the order
+of arguments is important!
+They are loaded in sequence, so later profiles can overwrite earlier profiles.
+
+If `-profile` is not specified, the pipeline will run locally and expect all software to be
+installed and available on the `PATH`. This is _not_ recommended.
 
-* `awsbatch`
-  * A generic configuration profile to be used with AWS Batch.
-* `conda`
-  * A generic configuration profile to be used with [conda](https://conda.io/docs/)
-  * Pulls most software from [Bioconda](https://bioconda.github.io/)
 * `docker`
   * A generic configuration profile to be used with [Docker](http://docker.com/)
   * Pulls software from dockerhub: [`nfcore/hic`](http://hub.docker.com/r/nfcore/hic/)
 * `singularity`
   * A generic configuration profile to be used with [Singularity](http://singularity.lbl.gov/)
   * Pulls software from DockerHub: [`nfcore/hic`](http://hub.docker.com/r/nfcore/hic/)
+* `conda`
+  * Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker or Singularity.
+  * A generic configuration profile to be used with [Conda](https://conda.io/docs/)
+  * Pulls most software from [Bioconda](https://bioconda.github.io/)
 * `test`
   * A profile with a complete configuration for automated testing
   * Includes links to test data so needs no other parameters
 
 ### `--reads`
+
 Use this to specify the location of your input FastQ files. For example:
 
 ```bash
@@ -152,18 +207,35 @@ Please note the following requirements:
 
 1. The path must be enclosed in quotes
 2. The path must have at least one `*` wildcard character
-3. When using the pipeline with paired end data, the path must use `{1,2}` notation to specify read pairs.
+3. When using the pipeline with paired end data, the path must use `{1,2}`
+notation to specify read pairs.
 
 If left unspecified, a default pattern is used: `data/*{1,2}.fastq.gz`
 
-## Reference genomes and annotation files
+### `--single_end`
+
+By default, the pipeline expects paired-end data. If you have single-end data, you need to specify `--single_end` on the command line when you launch the pipeline. A normal glob pattern, enclosed in quotation marks, can then be used for `--reads`. For example:
+
+```bash
+--single_end --reads '*.fastq'
+```
+
+It is not possible to run a mixture of single-end and paired-end files in one run.
+
+## Reference genomes
 
 The pipeline config files come bundled with paths to the illumina iGenomes reference index files. If running with docker or AWS, the configuration is set up to use the [AWS-iGenomes](https://ewels.github.io/AWS-iGenomes/) resource.
 
 ### `--genome` (using iGenomes)
+
 There are 31 different species supported in the iGenomes references. To run the pipeline, you must specify which to use with the `--genome` flag.
 
-You can find the keys to specify the genomes in the [iGenomes config file](../conf/igenomes.config). Common genomes that are supported are:
+There are 31 different species supported in the iGenomes references. To run
+the pipeline, you must specify which to use with the `--genome` flag.
+
+You can find the keys to specify the genomes in the
+[iGenomes config file](../conf/igenomes.config).
+Common genomes that are supported are:
 
 * Human
   * `--genome GRCh37`
@@ -176,11 +248,13 @@ You can find the keys to specify the genomes in the [iGenomes config file](../co
 
 > There are numerous others - check the config file for more.
 
-Note that you can use the same configuration setup to save sets of reference files for your own use, even if they are not part of the iGenomes resource. See the [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for instructions on where to save such a file.
+Note that you can use the same configuration setup to save sets of reference
+files for your own use, even if they are not part of the iGenomes resource.
+See the [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html)
+for instructions on where to save such a file.
 
 The syntax for this reference configuration is as follows:
 
-
 ```nextflow
 params {
   genomes {
@@ -194,18 +268,26 @@ params {
 ```
 
 ### `--fasta`
-If you prefer, you can specify the full path to your reference genome when you run the pipeline:
+
+If you prefer, you can specify the full path to your reference genome when you
+run the pipeline:
 
 ```bash
 --fasta '[path to Fasta reference]'
 ```
 
 ### `--igenomesIgnore`
-Do not load `igenomes.config` when running the pipeline. You may choose this option if you observe clashes between custom parameters and those supplied in `igenomes.config`.
+
+Do not load `igenomes.config` when running the pipeline. You may choose this
+option if you observe clashes between custom parameters and those supplied
+in `igenomes.config`.
 
 ### `--bwt2_index`
 
-The bowtie2 indexes are required to run the Hi-C pipeline. If the `--bwt2_index` is not specified, the pipeline will either use the igenome bowtie2 indexes (see `--genome` option) or build the indexes on-the-fly (see `--fasta` option)
+The bowtie2 indexes are required to run the Hi-C pipeline. If the
+`--bwt2_index` is not specified, the pipeline will either use the igenome
+bowtie2 indexes (see `--genome` option) or build the indexes on-the-fly
+(see `--fasta` option)
 
 ```bash
 --bwt2_index '[path to bowtie2 index (with basename)]'
@@ -213,8 +295,10 @@ The bowtie2 indexes are required to run the Hi-C pipeline. If the `--bwt2_index`
 
 ### `--chromosome_size`
 
-The Hi-C pipeline will also requires a two-columns text file with the chromosome name and its size (tab separated).
-If not specified, this file will be automatically created by the pipeline. In the latter case, the `--fasta` reference genome has to be specified.
+The Hi-C pipeline will also requires a two-columns text file with the
+chromosome name and its size (tab separated).
+If not specified, this file will be automatically created by the pipeline.
+In the latter case, the `--fasta` reference genome has to be specified.
 
 ```bash
    chr1    249250621
@@ -236,7 +320,8 @@ If not specified, this file will be automatically created by the pipeline. In th
 
 ### `--restriction_fragments`
 
-Finally, Hi-C experiments based on restriction enzyme digestion requires a BED file with coordinates of restriction fragments.
+Finally, Hi-C experiments based on restriction enzyme digestion requires a BED
+file with coordinates of restriction fragments.
 
 ```bash
    chr1   0       16007   HIC_chr1_1    0   +
@@ -252,22 +337,30 @@ Finally, Hi-C experiments based on restriction enzyme digestion requires a BED f
    (...)
 ```
 
-If not specified, this file will be automatically created by the pipline. In this case, the `--fasta` reference genome will be used.
+If not specified, this file will be automatically created by the pipline.
+In this case, the `--fasta` reference genome will be used.
 Note that the `--restriction_site` parameter is mandatory to create this file.
 
 ## Hi-C specific options
 
-The following options are defined in the `hicpro.config` file, and can be updated either using a custom configuration file (see `-c` option) or using command line parameter.
+The following options are defined in the `hicpro.config` file, and can be
+updated either using a custom configuration file (see `-c` option) or using
+command line parameter.
 
 ### Reads mapping
 
-The reads mapping is currently based on the two-steps strategy implemented in the HiC-pro pipeline. The idea is to first align reads from end-to-end.
-Reads that do not aligned are then trimmed at the ligation site, and their 5' end is re-aligned to the reference genome.
-Note that the default option are quite stringent, and can be updated according to the reads quality or the reference genome.
+The reads mapping is currently based on the two-steps strategy implemented in
+the HiC-pro pipeline. The idea is to first align reads from end-to-end.
+Reads that do not aligned are then trimmed at the ligation site, and their 5'
+end is re-aligned to the reference genome.
+Note that the default option are quite stringent, and can be updated according
+to the reads quality or the reference genome.
 
 #### `--bwt2_opts_end2end`
 
-Bowtie2 alignment option for end-to-end mapping. Default: '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'
+Bowtie2 alignment option for end-to-end mapping.
+Default: '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end
+--reorder'
 
 ```bash
 --bwt2_opts_end2end '[Options for bowtie2 step1 mapping on full reads]'
@@ -275,7 +368,9 @@ Bowtie2 alignment option for end-to-end mapping. Default: '--very-sensitive -L 3
 
 #### `--bwt2_opts_trimmed`
 
-Bowtie2 alignment option for trimmed reads mapping (step 2). Default: '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'
+Bowtie2 alignment option for trimmed reads mapping (step 2).
+Default: '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end
+--reorder'
 
 ```bash
 --bwt2_opts_trimmed '[Options for bowtie2 step2 mapping on trimmed reads]'
@@ -293,15 +388,20 @@ Minimum mapping quality. Reads with lower quality are discarded. Default: 10
 
 #### `--restriction_site`
 
-Restriction motif(s) for Hi-C digestion protocol. The restriction motif(s) is(are) used to generate the list of restriction fragments.
-The precise cutting site of the restriction enzyme has to be specified using the '^' character. Default: 'A^AGCTT'
+Restriction motif(s) for Hi-C digestion protocol. The restriction motif(s)
+is(are) used to generate the list of restriction fragments.
+The precise cutting site of the restriction enzyme has to be specified using
+the '^' character. Default: 'A^AGCTT'
 Here are a few examples:
-* MboI: '^GATC'
-* DpnII: '^GATC'
-* BglII: 'A^GATCT'
-* HindIII: 'A^AGCTT'
 
-Note that multiples restriction motifs can be provided (comma-separated).
+* MboI: ^GATC
+* DpnII: ^GATC
+* BglII: A^GATCT
+* HindIII: A^AGCTT
+* ARIMA kit: ^GATC,G^ANTC
+
+Note that multiples restriction motifs can be provided (comma-separated) and
+that 'N' base are supported.
 
 ```bash
 --restriction_size '[Cutting motif]'
@@ -309,16 +409,22 @@ Note that multiples restriction motifs can be provided (comma-separated).
 
 #### `--ligation_site`
 
-Ligation motif after reads ligation. This motif is used for reads trimming and depends on the fill in strategy.
-Note that multiple ligation sites can be specified. Default: 'AAGCTAGCTT'
+Ligation motif after reads ligation. This motif is used for reads trimming and
+depends on the fill in strategy.
+Note that multiple ligation sites can be specified (comma separated) and that
+'N' base is interpreted and replaced by 'A','C','G','T'.
+Default: 'AAGCTAGCTT'
 
 ```bash
 --ligation_site '[Ligation motif]'
 ```
 
+Exemple of the ARIMA kit: GATCGATC,GANTGATC,GANTANTC,GATCANTC
+
 #### `--min_restriction_fragment_size`
 
-Minimum size of restriction fragments to consider for the Hi-C processing. Default: ''
+Minimum size of restriction fragments to consider for the Hi-C processing.
+Default: ''
 
 ```bash
 --min_restriction_fragment_size '[numeric]'
@@ -326,7 +432,8 @@ Minimum size of restriction fragments to consider for the Hi-C processing. Defau
 
 #### `--max_restriction_fragment_size`
 
-Maximum size of restriction fragments to consider for the Hi-C processing. Default: ''
+Maximum size of restriction fragments to consider for the Hi-C processing.
+Default: ''
 
 ```bash
 --max_restriction_fragment_size '[numeric]'
@@ -334,7 +441,8 @@ Maximum size of restriction fragments to consider for the Hi-C processing. Defau
 
 #### `--min_insert_size`
 
-Minimum reads insert size. Shorter 3C products are discarded. Default: ''
+Minimum reads insert size. Shorter 3C products are discarded.
+Default: ''
 
 ```bash
 --min_insert_size '[numeric]'
@@ -342,7 +450,8 @@ Minimum reads insert size. Shorter 3C products are discarded. Default: ''
 
 #### `--max_insert_size`
 
-Maximum reads insert size. Longer 3C products are discarded. Default: ''
+Maximum reads insert size. Longer 3C products are discarded.
+Default: ''
 
 ```bash
 --max_insert_size '[numeric]'
@@ -352,8 +461,10 @@ Maximum reads insert size. Longer 3C products are discarded. Default: ''
 
 #### `--dnase`
 
-In DNAse Hi-C mode, all options related to digestion Hi-C (see previous section) are ignored.
-In this case, it is highly recommanded to use the `--min_cis_dist` parameter to remove spurious ligation products.
+In DNAse Hi-C mode, all options related to digestion Hi-C
+(see previous section) are ignored.
+In this case, it is highly recommanded to use the `--min_cis_dist` parameter
+to remove spurious ligation products.
 
 ```bash
 --dnase'
@@ -363,7 +474,8 @@ In this case, it is highly recommanded to use the `--min_cis_dist` parameter to
 
 #### `--min_cis_dist`
 
-Filter short range contact below the specified distance. Mainly useful for DNase Hi-C. Default: ''
+Filter short range contact below the specified distance.
+Mainly useful for DNase Hi-C. Default: ''
 
 ```bash
 --min_cis_dist '[numeric]'
@@ -387,7 +499,9 @@ If specified, duplicates reads are discarded before building contact maps.
 
 #### `--rm_multi`
 
-If specified, reads that aligned multiple times on the genome are discarded. Note the default mapping options are based on random hit assignment, meaning that only one position is kept per read.
+If specified, reads that aligned multiple times on the genome are discarded.
+Note the default mapping options are based on random hit assignment, meaning
+that only one position is kept per read.
 
 ```bash
 --rm_multi
@@ -395,41 +509,46 @@ If specified, reads that aligned multiple times on the genome are discarded. Not
 
 ## Genome-wide contact maps
 
-#### `--bin_size`
+### `--bin_size`
 
-Resolution of contact maps to generate (space separated). Default:'1000000,500000'
+Resolution of contact maps to generate (space separated).
+Default:'1000000,500000'
 
 ```bash
 --bins_size '[numeric]'
 ```
 
-#### `--ice_max_iter`
+### `--ice_max_iter`
 
-Maximum number of iteration for ICE normalization. Default: 100
+Maximum number of iteration for ICE normalization.
+Default: 100
 
 ```bash
 --ice_max_iter '[numeric]'
 ```
 
-#### `--ice_filer_low_count_perc`
+### `--ice_filer_low_count_perc`
 
-Define which pourcentage of bins with low counts should be force to zero. Default: 0.02
+Define which pourcentage of bins with low counts should be force to zero.
+Default: 0.02
 
 ```bash
 --ice_filter_low_count_perc '[numeric]'
 ```
 
-#### `--ice_filer_high_count_perc`
+### `--ice_filer_high_count_perc`
 
-Define which pourcentage of bins with low counts should be discarded before normalization. Default: 0
+Define which pourcentage of bins with low counts should be discarded before
+normalization. Default: 0
 
 ```bash
 --ice_filter_high_count_perc '[numeric]'
 ```
 
-#### `--ice_eps`
+### `--ice_eps`
 
-The relative increment in the results before declaring convergence for ICE normalization. Default: 0.1
+The relative increment in the results before declaring convergence for ICE
+normalization. Default: 0.1
 
 ```bash
 --ice_eps '[numeric]'
@@ -437,108 +556,169 @@ The relative increment in the results before declaring convergence for ICE norma
 
 ## Inputs/Outputs
 
-#### `--splitFastq`
+### `--splitFastq`
 
-By default, the nf-core Hi-C pipeline expects one read pairs per sample. However, for large Hi-C data processing single fastq files can be very time consuming.
-The `--splitFastq` option allows to automatically split input read pairs into chunks of reads. In this case, all chunks will be processed in parallel and merged before generating the contact maps, thus leading to a significant increase of processing performance.
+By default, the nf-core Hi-C pipeline expects one read pairs per sample.
+However, for large Hi-C data processing single fastq files can be very
+time consuming.
+The `--splitFastq` option allows to automatically split input read pairs
+into chunks of reads. In this case, all chunks will be processed in parallel
+and merged before generating the contact maps, thus leading to a significant
+increase of processing performance.
 
 ```bash
 --splitFastq '[Number of reads per chunk]'
 ```
 
-#### `--saveReference`
+### `--saveReference`
 
-If specified, annotation files automatically generated from the `--fasta` file are exported in the results folder. Default: false
+If specified, annotation files automatically generated from the `--fasta` file
+are exported in the results folder. Default: false
 
 ```bash
 --saveReference
 ```
 
-#### `--saveAlignedIntermediates`
+### `--saveAlignedIntermediates`
 
-If specified, all intermediate mapping files are saved and exported in the results folder. Default: false
+If specified, all intermediate mapping files are saved and exported in the
+results folder. Default: false
 
 ```bash
 --saveReference
 ```
 
+### `--saveInteractionBAM`
+
+If specified, write a BAM file with all classified reads (valid paires,
+dangling end, self-circle, etc.) and its tags.
+
 ## Skip options
 
-#### `--skip_maps`
+### `--skipMaps`
 
-If defined, the workflow stops with the list of valid interactions, and the genome-wide maps are not built. Usefult for capture-C analysis. Default: false
+If defined, the workflow stops with the list of valid interactions, and the
+genome-wide maps are not built. Usefult for capture-C analysis. Default: false
 
 ```bash
---skip_maps
+--skipMaps
 ```
 
-#### `--skip_ice`
+### `--skipIce`
 
-If defined, the ICE normalization is not run on the raw contact maps. Default: false
+If defined, the ICE normalization is not run on the raw contact maps.
+Default: false
 
 ```bash
---skip_ice
+--skipIce
 ```
 
-#### `--skip_cool`
+### `--skipCool`
 
 If defined, cooler files are not generated. Default: false
 
 ```bash
---skip_cool
+--skipCool
 ```
 
-#### `--skip_multiqc`
+### `--skipMultiQC`
 
 If defined, the MultiQC report is not generated. Default: false
 
 ```bash
---skip_multiqc
+--skipMultiQC
 ```
 
 ## Job resources
+
 ### Automatic resubmission
-Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the steps in the pipeline, if the job exits with an error code of `143` (exceeded requested resources) it will automatically resubmit with higher requests (2 x original, then 3 x original). If it still fails after three times then the pipeline is stopped.
+
+Each step in the pipeline has a default set of requirements for number of CPUs,
+memory and time. For most of the steps in the pipeline, if the job exits with
+an error code of `143` (exceeded requested resources) it will automatically
+resubmit with higher requests (2 x original, then 3 x original). If it still
+fails after three times then the pipeline is stopped.
 
 ### Custom resource requests
-Wherever process-specific requirements are set in the pipeline, the default value can be changed by creating a custom config file. See the files hosted at [`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf) for examples.
 
-If you are likely to be running `nf-core` pipelines regularly it may be a good idea to request that your custom config file is uploaded to the `nf-core/configs` git repository. Before you do this please can you test that the config file works with your pipeline of choice using the `-c` parameter (see definition below). You can then create a pull request to the `nf-core/configs` repository with the addition of your config file, associated documentation file (see examples in [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs)), and amending [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) to include your custom profile.
+Wherever process-specific requirements are set in the pipeline, the default value
+can be changed by creating a custom config file. See the files hosted
+at [`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf) for examples.
 
-If you have any questions or issues please send us a message on [`Slack`](https://nf-core-invite.herokuapp.com/).
+If you have any questions or issues please send us a message on [Slack](https://nf-co.re/join/slack).
 
 ## AWS Batch specific parameters
-Running the pipeline on AWS Batch requires a couple of specific parameters to be set according to your AWS Batch configuration. Please use the `-awsbatch` profile and then specify all of the following parameters.
+
+Running the pipeline on AWS Batch requires a couple of specific parameters to be
+set according to your AWS Batch configuration. Please use
+[`-profile awsbatch`](https://github.com/nf-core/configs/blob/master/conf/awsbatch.config)
+and then specify all of the following parameters.
+
 ### `--awsqueue`
+
 The JobQueue that you intend to use on AWS Batch.
+
 ### `--awsregion`
-The AWS region to run your job in. Default is set to `eu-west-1` but can be adjusted to your needs.
 
-Please make sure to also set the `-w/--work-dir` and `--outdir` parameters to a S3 storage bucket of your choice - you'll get an error message notifying you if you didn't.
+The AWS region in which to run your job. Default is set to `eu-west-1` but can be adjusted to your needs.
+
+### `--awscli`
+
+The [AWS CLI](https://www.nextflow.io/docs/latest/awscloud.html#aws-cli-installation)
+path in your custom AMI. Default: `/home/ec2-user/miniconda/bin/aws`.
+
+The AWS region to run your job in. Default is set to `eu-west-1` but can be
+adjusted to your needs.
+
+Please make sure to also set the `-w/--work-dir` and `--outdir` parameters to
+a S3 storage bucket of your choice - you'll get an error message notifying you
+if you didn't.
 
 ## Other command line parameters
 
 ### `--outdir`
+
 The output directory where the results will be saved.
 
 ### `--email`
-Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits. If set in your user config file (`~/.nextflow/config`) then you don't need to speicfy this on the command line for every run.
+
+Set this parameter to your e-mail address to get a summary e-mail with details
+of the run sent to you when the workflow exits. If set in your user config file
+(`~/.nextflow/config`) then you don't need to specify this on the command line for every run.
+
+### `--email_on_fail`
+
+This works exactly as with `--email`, except emails are only sent if the workflow is not successful.
+
+### `--max_multiqc_email_size`
+
+Threshold size for MultiQC report to be attached in notification email. If file generated by pipeline exceeds the threshold, it will not be attached (Default: 25MB).
 
 ### `-name`
+
 Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic.
 
-This is used in the MultiQC report (if not default) and in the summary HTML / e-mail (always).
+Name for the pipeline run. If not specified, Nextflow will automatically generate
+a random mnemonic.
+
+This is used in the MultiQC report (if not default) and in the summary HTML /
+e-mail (always).
 
 **NB:** Single hyphen (core Nextflow option)
 
 ### `-resume`
-Specify this when restarting a pipeline. Nextflow will used cached results from any pipeline steps where the inputs are the same, continuing from where it got to previously.
 
-You can also supply a run name to resume a specific run: `-resume [run-name]`. Use the `nextflow log` command to show previous run names.
+Specify this when restarting a pipeline. Nextflow will used cached results from
+any pipeline steps where the inputs are the same, continuing from where it got
+to previously.
+
+You can also supply a run name to resume a specific run: `-resume [run-name]`.
+Use the `nextflow log` command to show previous run names.
 
 **NB:** Single hyphen (core Nextflow option)
 
 ### `-c`
+
 Specify the path to a specific config file (this is a core NextFlow command).
 
 **NB:** Single hyphen (core Nextflow option)
@@ -546,27 +726,59 @@ Specify the path to a specific config file (this is a core NextFlow command).
 Note - you can use this to override pipeline defaults.
 
 ### `--custom_config_version`
-Provide git commit id for custom Institutional configs hosted at `nf-core/configs`. This was implemented for reproducibility purposes. Default is set to `master`.
+
+Provide git commit id for custom Institutional configs hosted at `nf-core/configs`.
+This was implemented for reproducibility purposes. Default: `master`.
 
 ```bash
 ## Download and use config file with following git commid id
 --custom_config_version d52db660777c4bf36546ddb188ec530c3ada1b96
 ```
 
+### `--custom_config_base`
+
+If you're running offline, nextflow will not be able to fetch the institutional config files
+from the internet. If you don't need them, then this is not a problem. If you do need them,
+you should download the files from the repo and tell nextflow where to find them with the
+`custom_config_base` option. For example:
+
+```bash
+## Download and unzip the config files
+cd /path/to/my/configs
+wget https://github.com/nf-core/configs/archive/master.zip
+unzip master.zip
+
+## Run the pipeline
+cd /path/to/my/data
+nextflow run /path/to/pipeline/ --custom_config_base /path/to/my/configs/configs-master/
+```
+
+> Note that the nf-core/tools helper package has a `download` command to download all required pipeline
+> files + singularity containers + institutional configs in one go for you, to make this process easier.
+
 ### `--max_memory`
+
 Use to set a top-limit for the default memory requirement for each process.
 Should be a string in the format integer-unit. eg. `--max_memory '8.GB'`
 
 ### `--max_time`
+
 Use to set a top-limit for the default time requirement for each process.
 Should be a string in the format integer-unit. eg. `--max_time '2.h'`
 
 ### `--max_cpus`
+
 Use to set a top-limit for the default CPU requirement for each process.
 Should be a string in the format integer-unit. eg. `--max_cpus 1`
 
 ### `--plaintext_email`
+
 Set to receive plain-text e-mails instead of HTML formatted.
 
+### `--monochrome_logs`
+
+Set to disable colourful command line output and live life in monochrome.
+
 ### `--multiqc_config`
+
 Specify a path to a custom MultiQC configuration file.
diff --git a/environment.yml b/environment.yml
index 34958b7d3505d0ad73b33fb325ab1f87a0d6f8a3..b9e6c0218828ade3813a07574bec46380ac7d3f9 100644
--- a/environment.yml
+++ b/environment.yml
@@ -1,21 +1,30 @@
 # You can use this file to create a conda environment for this pipeline:
 #   conda env create -f environment.yml
-name: nf-core-hic-1.0.0
+name: nf-core-hic-1.2.0
 channels:
   - conda-forge
   - bioconda
   - defaults
 dependencies:
-  - python=2.7.15
-  - pip=19.1
-  - scipy=1.2.1
-  - numpy=1.16.3
-  - r-markdown=0.9
-  - bx-python=0.8.2     
-  - pysam=0.15.2
-  - cooler=0.8.5
-  - bowtie2=2.3.5
-  - samtools=1.9
-  - multiqc=1.7
+  - conda-forge::python=3.7.6
+  - pip=20.0.1
+  - conda-forge::scipy=1.4.1
+  - conda-forge::numpy=1.18.1
+  - bioconda::iced=0.5.6
+  - bioconda::bx-python=0.8.8
+  - bioconda::pysam=0.15.4
+  - conda-forge::pymdown-extensions=7.1
+  - bioconda::cooler=0.8.6
+  - bioconda::bowtie2=2.3.5
+  - bioconda::samtools=1.9
+  - bioconda::multiqc=1.8
+
+## Dev tools
+  - bioconda::hicexplorer=3.4.3
+  - bioconda::bioconductor-hitc=1.32.0
+  - conda-forge::r-optparse=1.6.6
+  - bioconda::ucsc-bedgraphtobigwig=357
+  - conda-forge::cython=0.29.19
   - pip:
-    - iced==0.5.1
+    - cooltools==0.3.2
+    - fanc==0.8.30
\ No newline at end of file
diff --git a/main.nf b/main.nf
index ce29fd5dfa679a2b2c404d553108150ae29cf233..7eb43154af1206fab562a759ac27a87faacb86ed 100644
--- a/main.nf
+++ b/main.nf
@@ -9,7 +9,6 @@
 ----------------------------------------------------------------------------------------
 */
 
-
 def helpMessage() {
     // Add to this help message with new command line parameters
     log.info nfcoreHeader()
@@ -22,57 +21,64 @@ def helpMessage() {
     nextflow run nf-core/hic --reads '*_R{1,2}.fastq.gz' -profile conda
 
     Mandatory arguments:
-      --reads				    Path to input data (must be surrounded with quotes)
-      -profile                      	    Configuration profile to use. Can use multiple (comma separated)
-                                    	    Available: conda, docker, singularity, awsbatch, test and more.
-
-    References:                      	    If not specified in the configuration file or you wish to overwrite any of the references.
-      --genome                              Name of iGenomes reference
-      --bwt2_index                     	    Path to Bowtie2 index
-      --fasta                       	    Path to Fasta reference
-      --chromosome_size             	    Path to chromosome size file
-      --restriction_fragments    	    Path to restriction fragment file (bed)
-
-    Options:
-      --bwt2_opts_end2end		    Options for bowtie2 end-to-end mappinf (first mapping step)
-      --bwt2_opts_trimmed	    	    Options for bowtie2 mapping after ligation site trimming
-      --min_mapq		    	    Minimum mapping quality values to consider
-
-      --restriction_site	    	    Cutting motif(s) of restriction enzyme(s) (comma separated)
-      --ligation_site		    	    Ligation motifs to trim (comma separated)
-      --min_restriction_fragment_size	    Minimum size of restriction fragments to consider
-      --max_restriction_framgnet_size	    Maximum size of restriction fragmants to consider
-      --min_insert_size			    Minimum insert size of mapped reads to consider
-      --max_insert_size			    Maximum insert size of mapped reads to consider
-
-      --dnase				    Run DNase Hi-C mode. All options related to restriction fragments are not considered
-
-      --min_cis_dist			    Minimum intra-chromosomal distance to consider
-      --rm_singleton			    Remove singleton reads
-      --rm_multi			    Remove multi-mapped reads
-      --rm_dup				    Remove duplicates
-
-      --bin_size			    Bin size for contact maps (comma separated)
-      --ice_max_iter			    Maximum number of iteration for ICE normalization
-      --ice_filter_low_count_perc	    Percentage of low counts columns/rows to filter before ICE normalization
-      --ice_filter_high_count_perc	    Percentage of high counts columns/rows to filter before ICE normalization
-      --ice_eps				    Convergence criteria for ICE normalization
-
-    Other options:
-      --splitFastq			    Size of read chuncks to use to speed up the workflow
-      --outdir				    The output directory where the results will be saved
-      --email                       	    Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits
-      -name                         	    Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic.
-
-    Step options:
-      --skip_maps                           Skip generation of contact maps. Useful for capture-C
-      --skip_ice			    Skip ICE normalization
-      --skip_cool			    Skip generation of cooler files
-      --skip_multiQC			    Skip MultiQC
-
-    AWSBatch options:
-      --awsqueue			    The AWSBatch JobQueue that needs to be set when running on AWSBatch
-      --awsregion                   	    The AWS Region for your AWS Batch job to run on
+      --reads [file]                            Path to input data (must be surrounded with quotes)
+      -profile [str]                            Configuration profile to use. Can use multiple (comma separated)
+                                                Available: conda, docker, singularity, awsbatch, test and more.
+
+    References                                  If not specified in the configuration file or you wish to overwrite any of the references.
+      --genome [str]                            Name of iGenomes reference
+      --bwt2_index [file]                       Path to Bowtie2 index
+      --fasta [file]                            Path to Fasta reference
+      --chromosome_size [file]                  Path to chromosome size file
+      --restriction_fragments [file]            Path to restriction fragment file (bed)
+      --save_reference [bool]                   Save reference genome to output folder. Default: False
+      --save_aligned_intermediates [bool]       Save intermediates alignment files. Default: False
+
+    Alignments
+      --bwt2_opts_end2end [str]                 Options for bowtie2 end-to-end mappinf (first mapping step). See hic.config for default.
+      --bwt2_opts_trimmed [str]                 Options for bowtie2 mapping after ligation site trimming. See hic.config for default.
+      --min_mapq [int]                          Minimum mapping quality values to consider. Default: 10
+      --restriction_site [str]                  Cutting motif(s) of restriction enzyme(s) (comma separated). Default: 'A^AGCTT'
+      --ligation_site [str]                     Ligation motifs to trim (comma separated). Default: 'AAGCTAGCTT'
+      --rm_singleton [bool]                     Remove singleton reads. Default: true
+      --rm_multi [bool]                         Remove multi-mapped reads. Default: true
+      --rm_dup [bool]                           Remove duplicates. Default: true
+ 
+    Contacts calling
+      --min_restriction_fragment_size [int]     Minimum size of restriction fragments to consider. Default: None
+      --max_restriction_fragment_size [int]     Maximum size of restriction fragments to consider. Default: None
+      --min_insert_size [int]                   Minimum insert size of mapped reads to consider. Default: None
+      --max_insert_size [int]                   Maximum insert size of mapped reads to consider. Default: None
+      --save_interaction_bam [bool]             Save BAM file with interaction tags (dangling-end, self-circle, etc.). Default: False
+
+      --dnase [bool]                            Run DNase Hi-C mode. All options related to restriction fragments are not considered. Default: False
+      --min_cis_dist [int]                      Minimum intra-chromosomal distance to consider. Default: None
+
+    Contact maps
+      --bin_size [int]                          Bin size for contact maps (comma separated). Default: '1000000,500000'
+      --ice_max_iter [int]                      Maximum number of iteration for ICE normalization. Default: 100
+      --ice_filter_low_count_perc [float]       Percentage of low counts columns/rows to filter before ICE normalization. Default: 0.02
+      --ice_filter_high_count_perc [float]      Percentage of high counts columns/rows to filter before ICE normalization. Default: 0
+      --ice_eps [float]                         Convergence criteria for ICE normalization. Default: 0.1
+
+
+    Workflow
+      --skip_maps [bool]                        Skip generation of contact maps. Useful for capture-C. Default: False
+      --skip_ice [bool]                         Skip ICE normalization. Default: False
+      --skip_cool [bool]                        Skip generation of cool files. Default: False
+      --skip_multiqc [bool]                     Skip MultiQC. Default: False
+
+    Other
+      --split_fastq [bool]                      Size of read chuncks to use to speed up the workflow. Default: None
+      --outdir [file]                           The output directory where the results will be saved. Default: './results'
+      --email [email]                           Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits. Default: None
+      --email_on_fail [email]                   Same as --email, except only send mail if the workflow is not successful
+      --max_multiqc_email_size [str]            Theshold size for MultiQC report to be attached in notification email. If file generated by pipeline exceeds the threshold, it will not be attached (Default: 25MB)
+      -name [str]                               Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic. Default: None
+
+    AWSBatch
+      --awsqueue [str]                          The AWSBatch JobQueue that needs to be set when running on AWSBatch
+      --awsregion [str]                         The AWS Region for your AWS Batch job to run on
     """.stripIndent()
 }
 
@@ -80,7 +86,7 @@ def helpMessage() {
  * SET UP CONFIGURATION VARIABLES
  */
 
-// Show help emssage
+// Show help message
 if (params.help){
     helpMessage()
     exit 0
@@ -100,28 +106,27 @@ if (!params.dnase && !params.ligation_site) {
 params.bwt2_index = params.genome ? params.genomes[ params.genome ].bowtie2 ?: false : false
 params.fasta = params.genome ? params.genomes[ params.genome ].fasta ?: false : false
 
-
 // Has the run name been specified by the user?
 //  this has the bonus effect of catching both -name and --name
 custom_runName = params.name
-if( !(workflow.runName ==~ /[a-z]+_[a-z]+/) ){
-  custom_runName = workflow.runName
+if (!(workflow.runName ==~ /[a-z]+_[a-z]+/)) {
+    custom_runName = workflow.runName
 }
 
-
-if( workflow.profile == 'awsbatch') {
-  // AWSBatch sanity checking
-  if (!params.awsqueue || !params.awsregion) exit 1, "Specify correct --awsqueue and --awsregion parameters on AWSBatch!"
-  // Check outdir paths to be S3 buckets if running on AWSBatch
-  // related: https://github.com/nextflow-io/nextflow/issues/813
-  if (!params.outdir.startsWith('s3:')) exit 1, "Outdir not on S3 - specify S3 Bucket to run on AWSBatch!"
-  // Prevent trace files to be stored on S3 since S3 does not support rolling files.
-  if (workflow.tracedir.startsWith('s3:')) exit 1, "Specify a local tracedir or run without trace! S3 cannot be used for tracefiles."
+if (workflow.profile.contains('awsbatch')) {
+    // AWSBatch sanity checking
+    if (!params.awsqueue || !params.awsregion) exit 1, "Specify correct --awsqueue and --awsregion parameters on AWSBatch!"
+    // Check outdir paths to be S3 buckets if running on AWSBatch
+    // related: https://github.com/nextflow-io/nextflow/issues/813
+    if (!params.outdir.startsWith('s3:')) exit 1, "Outdir not on S3 - specify S3 Bucket to run on AWSBatch!"
+    // Prevent trace files to be stored on S3 since S3 does not support rolling files.
+    if (params.tracedir.startsWith('s3:')) exit 1, "Specify a local tracedir or run without trace! S3 cannot be used for tracefiles."
 }
 
 // Stage config files
-ch_multiqc_config = Channel.fromPath(params.multiqc_config)
-ch_output_docs = Channel.fromPath("$baseDir/docs/output.md")
+ch_multiqc_config = file("$baseDir/assets/multiqc_config.yaml", checkIfExists: true)
+ch_multiqc_custom_config = params.multiqc_config ? Channel.fromPath(params.multiqc_config, checkIfExists: true) : Channel.empty()
+ch_output_docs = file("$baseDir/docs/output.md", checkIfExists: true)
 
 /**********************************************************
  * SET UP CHANNELS
@@ -130,7 +135,6 @@ ch_output_docs = Channel.fromPath("$baseDir/docs/output.md")
 /*
  * input read files
  */
-
 if (params.readPaths){
 
    raw_reads = Channel.create()
@@ -150,11 +154,11 @@ if (params.readPaths){
       .separate( raw_reads, raw_reads_2 ) { a -> [tuple(a[0], a[1][0]), tuple(a[0], a[1][1])] }
 }
 
-if ( params.splitFastq ){
+if ( params.split_fastq ){
    raw_reads_full = raw_reads.concat( raw_reads_2 )
    raw_reads = raw_reads_full.splitFastq( by: params.splitFastq , file: true)
  }else{
-   raw_reads = raw_reads.concat( raw_reads_2 )
+   raw_reads = raw_reads.concat( raw_reads_2 ).dump(tag: "data")
 }
 
 
@@ -189,7 +193,6 @@ else {
 }
 
 // Chromosome size
-
 if ( params.chromosome_size ){
    Channel.fromPath( params.chromosome_size , checkIfExists: true)
          .into {chromosome_size; chromosome_size_cool}
@@ -220,10 +223,6 @@ else {
 // Resolutions for contact maps
 map_res = Channel.from( params.bin_size.tokenize(',') )
 
-// Stage config files
-ch_multiqc_config = Channel.fromPath(params.multiqc_config)
-ch_output_docs = Channel.fromPath("$baseDir/docs/output.md")
-
 /**********************************************************
  * SET UP LOGS
  */
@@ -234,14 +233,19 @@ def summary = [:]
 if(workflow.revision) summary['Pipeline Release'] = workflow.revision
 summary['Run Name']         = custom_runName ?: workflow.runName
 summary['Reads']            = params.reads
-summary['splitFastq']       = params.splitFastq
+summary['splitFastq']       = params.split_fastq
 summary['Fasta Ref']        = params.fasta
 summary['Restriction Motif']= params.restriction_site
 summary['Ligation Motif']   = params.ligation_site
 summary['DNase Mode']       = params.dnase
 summary['Remove Dup']       = params.rm_dup
+summary['Min MAPQ']         = params.min_mapq
+summary['Min Fragment Size']= params.min_restriction_fragment_size
+summary['Max Fragment Size']= params.max_restriction_fragment_size
+summary['Min Insert Size']  = params.min_insert_size
+summary['Max Insert Size']  = params.max_insert_size
+summary['Min CIS dist']     = params.min_cis_dist
 summary['Maps resolution']  = params.bin_size
-
 summary['Max Memory']       = params.max_memory
 summary['Max CPUs']         = params.max_cpus
 summary['Max Time']         = params.max_time
@@ -270,14 +274,15 @@ if(params.email) {
   summary['MultiQC maxsize'] = params.maxMultiqcEmailFileSize
 }
 log.info summary.collect { k,v -> "${k.padRight(18)}: $v" }.join("\n")
-log.info "\033[2m----------------------------------------------------\033[0m"
+log.info "-\033[2m--------------------------------------------------\033[0m-"
 
 // Check the hostnames against configured profiles
 checkHostname()
 
-def create_workflow_summary(summary) {
-    def yaml_file = workDir.resolve('workflow_summary_mqc.yaml')
-    yaml_file.text  = """
+Channel.from(summary.collect{ [it.key, it.value] })
+    .map { k,v -> "<dt>$k</dt><dd><samp>${v ?: '<span style=\"color:#999999;\">N/A</a>'}</samp></dd>" }
+    .reduce { a, b -> return [a, b].join("\n            ") }
+    .map { x -> """
     id: 'nf-core-hic-summary'
     description: " - this information is collected when the pipeline is started."
     section_name: 'nf-core/hic Workflow Summary'
@@ -285,17 +290,15 @@ def create_workflow_summary(summary) {
     plot_type: 'html'
     data: |
         <dl class=\"dl-horizontal\">
-${summary.collect { k,v -> "            <dt>$k</dt><dd><samp>${v ?: '<span style=\"color:#999999;\">N/A</a>'}</samp></dd>" }.join("\n")}
+            $x
         </dl>
-    """.stripIndent()
-
-   return yaml_file
-}
-
+    """.stripIndent() }
+    .set { ch_workflow_summary }
 
 /*
  * Parse software version numbers
  */
+
 process get_software_versions {
    publishDir "${params.outdir}/pipeline_info", mode: 'copy',
    saveAs: {filename ->
@@ -319,6 +322,25 @@ process get_software_versions {
    """
 }
 
+def create_workflow_summary(summary) {
+
+    def yaml_file = workDir.resolve('workflow_summary_mqc.yaml')
+    yaml_file.text  = """
+    id: 'nf-core-chipseq-summary'
+    description: " - this information is collected when the pipeline is started."
+    section_name: 'nf-core/chipseq Workflow Summary'
+    section_href: 'https://github.com/nf-core/chipseq'
+    plot_type: 'html'
+    data: |
+        <dl class=\"dl-horizontal\">
+${summary.collect { k,v -> "            <dt>$k</dt><dd><samp>${v ?: '<span style=\"color:#999999;\">N/A</a>'}</samp></dd>" }.join("\n")}
+        </dl>
+    """.stripIndent()
+
+   return yaml_file
+}
+
+
 
 /****************************************************
  * PRE-PROCESSING
@@ -327,8 +349,9 @@ process get_software_versions {
 if(!params.bwt2_index && params.fasta){
     process makeBowtie2Index {
         tag "$bwt2_base"
-        publishDir path: { params.saveReference ? "${params.outdir}/reference_genome" : params.outdir },
-                   saveAs: { params.saveReference ? it : null }, mode: 'copy'
+        label 'process_highmem'
+        publishDir path: { params.save_reference ? "${params.outdir}/reference_genome" : params.outdir },
+                   saveAs: { params.save_reference ? it : null }, mode: 'copy'
 
         input:
         file fasta from fasta_for_index
@@ -350,8 +373,9 @@ if(!params.bwt2_index && params.fasta){
 if(!params.chromosome_size && params.fasta){
     process makeChromSize {
         tag "$fasta"
-        publishDir path: { params.saveReference ? "${params.outdir}/reference_genome" : params.outdir },
-                   saveAs: { params.saveReference ? it : null }, mode: 'copy'
+	label 'process_low'
+        publishDir path: { params.save_reference ? "${params.outdir}/reference_genome" : params.outdir },
+                   saveAs: { params.save_reference ? it : null }, mode: 'copy'
 
         input:
         file fasta from fasta_for_chromsize
@@ -369,9 +393,10 @@ if(!params.chromosome_size && params.fasta){
 
 if(!params.restriction_fragments && params.fasta && !params.dnase){
     process getRestrictionFragments {
-        tag "$fasta [${params.restriction_site}]"
-        publishDir path: { params.saveReference ? "${params.outdir}/reference_genome" : params.outdir },
-                   saveAs: { params.saveReference ? it : null }, mode: 'copy'
+        tag "$fasta ${params.restriction_site}"
+	label 'process_low'
+        publishDir path: { params.save_reference ? "${params.outdir}/reference_genome" : params.outdir },
+                   saveAs: { params.save_reference ? it : null }, mode: 'copy'
 
         input:
         file fasta from fasta_for_resfrag
@@ -396,188 +421,192 @@ if(!params.restriction_fragments && params.fasta && !params.dnase){
 
 process bowtie2_end_to_end {
    tag "$prefix"
-   publishDir path: { params.saveAlignedIntermediates ? "${params.outdir}/mapping" : params.outdir },
-   	      saveAs: { params.saveAlignedIntermediates ? it : null }, mode: 'copy'
+   label 'process_medium'
+   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping" : params.outdir },
+   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: 'copy'
 
    input:
-        set val(sample), file(reads) from raw_reads
-        file index from bwt2_index_end2end.collect()
+   set val(sample), file(reads) from raw_reads
+   file index from bwt2_index_end2end.collect()
 
    output:
-	set val(prefix), file("${prefix}_unmap.fastq") into unmapped_end_to_end
-     	set val(prefix), file("${prefix}.bam") into end_to_end_bam
+   set val(prefix), file("${prefix}_unmap.fastq") into unmapped_end_to_end
+   set val(prefix), file("${prefix}.bam") into end_to_end_bam
 
    script:
-	prefix = reads.toString() - ~/(\.fq)?(\.fastq)?(\.gz)?$/
-        def bwt2_opts = params.bwt2_opts_end2end
-
-	if (!params.dnase){
-	   """
-	   bowtie2 --rg-id BMG --rg SM:${prefix} \\
-		${bwt2_opts} \\
-		-p ${task.cpus} \\
-		-x ${index}/${bwt2_base} \\
-		--un ${prefix}_unmap.fastq \\
-	 	-U ${reads} | samtools view -F 4 -bS - > ${prefix}.bam
-           """
-	}else{
-	   """
-	   bowtie2 --rg-id BMG --rg SM:${prefix} \\
-		${bwt2_opts} \\
-		-p ${task.cpus} \\
-		-x ${index}/${bwt2_base} \\
-		--un ${prefix}_unmap.fastq \\
-	 	-U ${reads} > ${prefix}.bam
-           """
-	}
+   prefix = reads.toString() - ~/(\.fq)?(\.fastq)?(\.gz)?$/
+   def bwt2_opts = params.bwt2_opts_end2end
+
+   if (!params.dnase){
+   """
+   bowtie2 --rg-id BMG --rg SM:${prefix} \\
+	${bwt2_opts} \\
+	-p ${task.cpus} \\
+	-x ${index}/${bwt2_base} \\
+	--un ${prefix}_unmap.fastq \\
+ 	-U ${reads} | samtools view -F 4 -bS - > ${prefix}.bam
+   """
+   }else{
+   """
+   bowtie2 --rg-id BMG --rg SM:${prefix} \\
+	${bwt2_opts} \\
+	-p ${task.cpus} \\
+	-x ${index}/${bwt2_base} \\
+	--un ${prefix}_unmap.fastq \\
+ 	-U ${reads} > ${prefix}.bam
+   """
+   }
 }
 
 process trim_reads {
    tag "$prefix"
-   publishDir path: { params.saveAlignedIntermediates ? "${params.outdir}/mapping" : params.outdir },
-   	      saveAs: { params.saveAlignedIntermediates ? it : null }, mode: 'copy'
+   label 'process_low'
+   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping" : params.outdir },
+   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: 'copy'
 
    when:
-      !params.dnase
+   !params.dnase
 
    input:
-      set val(prefix), file(reads) from unmapped_end_to_end
+   set val(prefix), file(reads) from unmapped_end_to_end
 
    output:
-      set val(prefix), file("${prefix}_trimmed.fastq") into trimmed_reads
+   set val(prefix), file("${prefix}_trimmed.fastq") into trimmed_reads
 
    script:
-      """
-      cutsite_trimming --fastq $reads \\
-       		       --cutsite  ${params.ligation_site} \\
-                       --out ${prefix}_trimmed.fastq
-      """
+   """
+   cutsite_trimming --fastq $reads \\
+                    --cutsite  ${params.ligation_site} \\
+                    --out ${prefix}_trimmed.fastq
+   """
 }
 
 process bowtie2_on_trimmed_reads {
    tag "$prefix"
-   publishDir path: { params.saveAlignedIntermediates ? "${params.outdir}/mapping" : params.outdir },
-   	      saveAs: { params.saveAlignedIntermediates ? it : null }, mode: 'copy'
+   label 'process_medium'
+   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping" : params.outdir },
+   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: 'copy'
 
    when:
-      !params.dnase
+   !params.dnase
 
    input:
-      set val(prefix), file(reads) from trimmed_reads
-      file index from bwt2_index_trim.collect()
+   set val(prefix), file(reads) from trimmed_reads
+   file index from bwt2_index_trim.collect()
 
    output:
-      set val(prefix), file("${prefix}_trimmed.bam") into trimmed_bam
+   set val(prefix), file("${prefix}_trimmed.bam") into trimmed_bam
 
    script:
-      prefix = reads.toString() - ~/(_trimmed)?(\.fq)?(\.fastq)?(\.gz)?$/
-      """
-      bowtie2 --rg-id BMG --rg SM:${prefix} \\
-      	      ${params.bwt2_opts_trimmed} \\
-              -p ${task.cpus} \\
-	      -x ${index}/${bwt2_base} \\
-	      -U ${reads} | samtools view -bS - > ${prefix}_trimmed.bam
-      """
+   prefix = reads.toString() - ~/(_trimmed)?(\.fq)?(\.fastq)?(\.gz)?$/
+   """
+   bowtie2 --rg-id BMG --rg SM:${prefix} \\
+           ${params.bwt2_opts_trimmed} \\
+           -p ${task.cpus} \\
+           -x ${index}/${bwt2_base} \\
+           -U ${reads} | samtools view -bS - > ${prefix}_trimmed.bam
+   """
 }
 
 if (!params.dnase){
    process merge_mapping_steps{
       tag "$sample = $bam1 + $bam2"
-      publishDir path: { params.saveAlignedIntermediates ? "${params.outdir}/mapping" : params.outdir },
-   	      saveAs: { params.saveAlignedIntermediates ? it : null }, mode: 'copy'
+      label 'process_medium'
+      publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping" : params.outdir },
+   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: 'copy'
 
       input:
-         set val(prefix), file(bam1), file(bam2) from end_to_end_bam.join( trimmed_bam )
+      set val(prefix), file(bam1), file(bam2) from end_to_end_bam.join( trimmed_bam )
 
       output:
-         set val(sample), file("${prefix}_bwt2merged.bam") into bwt2_merged_bam
-         set val(oname), file("${prefix}.mapstat") into all_mapstat
+      set val(sample), file("${prefix}_bwt2merged.bam") into bwt2_merged_bam
+      set val(oname), file("${prefix}.mapstat") into all_mapstat
 
       script:
-         sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2)/
-         tag = prefix.toString() =~/_R1|_val_1/ ? "R1" : "R2"
-         oname = prefix.toString() - ~/(\.[0-9]+)$/
-
-         """
-         samtools merge -@ ${task.cpus} \\
-       	             -f ${prefix}_bwt2merged.bam \\
-	             ${bam1} ${bam2}
+      sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2|_1$|_2)/
+      tag = prefix.toString() =~/_R1|_val_1|_1/ ? "R1" : "R2"
+      oname = prefix.toString() - ~/(\.[0-9]+)$/
+      """
+      samtools merge -@ ${task.cpus} \\
+    	             -f ${prefix}_bwt2merged.bam \\
+                     ${bam1} ${bam2}
 
-         samtools sort -@ ${task.cpus} -m 800M \\
+      samtools sort -@ ${task.cpus} -m 800M \\
       	            -n -T /tmp/ \\
 	            -o ${prefix}_bwt2merged.sorted.bam \\
 	            ${prefix}_bwt2merged.bam
 
-         mv ${prefix}_bwt2merged.sorted.bam ${prefix}_bwt2merged.bam
-
-         echo "## ${prefix}" > ${prefix}.mapstat
-         echo -n "total_${tag}\t" >> ${prefix}.mapstat
-         samtools view -c ${prefix}_bwt2merged.bam >> ${prefix}.mapstat
-         echo -n "mapped_${tag}\t" >> ${prefix}.mapstat
-         samtools view -c -F 4 ${prefix}_bwt2merged.bam >> ${prefix}.mapstat
-         echo -n "global_${tag}\t" >> ${prefix}.mapstat
-         samtools view -c -F 4 ${bam1} >> ${prefix}.mapstat
-         echo -n "local_${tag}\t"  >> ${prefix}.mapstat
-         samtools view -c -F 4 ${bam2} >> ${prefix}.mapstat
-         """
+      mv ${prefix}_bwt2merged.sorted.bam ${prefix}_bwt2merged.bam
+
+      echo "## ${prefix}" > ${prefix}.mapstat
+      echo -n "total_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c ${prefix}_bwt2merged.bam >> ${prefix}.mapstat
+      echo -n "mapped_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c -F 4 ${prefix}_bwt2merged.bam >> ${prefix}.mapstat
+      echo -n "global_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c -F 4 ${bam1} >> ${prefix}.mapstat
+      echo -n "local_${tag}\t"  >> ${prefix}.mapstat
+      samtools view -c -F 4 ${bam2} >> ${prefix}.mapstat
+      """
    }
 }else{
    process dnase_mapping_stats{
       tag "$sample = $bam1"
-      publishDir path: { params.saveAlignedIntermediates ? "${params.outdir}/mapping" : params.outdir },
-   	      saveAs: { params.saveAlignedIntermediates ? it : null }, mode: 'copy'
+      label 'process_medium'
+      publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping" : params.outdir },
+   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: 'copy'
 
       input:
-         set val(prefix), file(bam1) from end_to_end_bam
+      set val(prefix), file(bam1) from end_to_end_bam
 
       output:
-         set val(sample), file(bam1) into bwt2_merged_bam
-         set val(oname), file("${prefix}.mapstat") into all_mapstat
+      set val(sample), file(bam1) into bwt2_merged_bam
+      set val(oname), file("${prefix}.mapstat") into all_mapstat
 
       script:
-         sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2)/
-         tag = prefix.toString() =~/_R1|_val_1/ ? "R1" : "R2"
-         oname = prefix.toString() - ~/(\.[0-9]+)$/
-
-         """
-         echo "## ${prefix}" > ${prefix}.mapstat
-         echo -n "total_${tag}\t" >> ${prefix}.mapstat
-         samtools view -c ${bam1} >> ${prefix}.mapstat
-	 echo -n "mapped_${tag}\t" >> ${prefix}.mapstat
-         samtools view -c -F 4 ${bam1} >> ${prefix}.mapstat
-         echo -n "global_${tag}\t" >> ${prefix}.mapstat
-         samtools view -c -F 4 ${bam1} >> ${prefix}.mapstat
-         echo -n "local_${tag}\t0"  >> ${prefix}.mapstat
-         """
+      sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2|_1|_2)/
+      tag = prefix.toString() =~/_R1|_val_1|_1/ ? "R1" : "R2"
+      oname = prefix.toString() - ~/(\.[0-9]+)$/
+      """
+      echo "## ${prefix}" > ${prefix}.mapstat
+      echo -n "total_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c ${bam1} >> ${prefix}.mapstat
+      echo -n "mapped_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c -F 4 ${bam1} >> ${prefix}.mapstat
+      echo -n "global_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c -F 4 ${bam1} >> ${prefix}.mapstat
+      echo -n "local_${tag}\t0"  >> ${prefix}.mapstat
+      """
    }
 }
 
 process combine_mapped_files{
    tag "$sample = $r1_prefix + $r2_prefix"
+   label 'process_low'
    publishDir "${params.outdir}/mapping", mode: 'copy',
    	      saveAs: {filename -> filename.indexOf(".pairstat") > 0 ? "stats/$filename" : "$filename"}
 
    input:
-      set val(sample), file(aligned_bam) from bwt2_merged_bam.groupTuple()
+   set val(sample), file(aligned_bam) from bwt2_merged_bam.groupTuple()
 
    output:
-      set val(sample), file("${sample}_bwt2pairs.bam") into paired_bam
-      set val(oname), file("*.pairstat") into all_pairstat
+   set val(sample), file("${sample}_bwt2pairs.bam") into paired_bam
+   set val(oname), file("*.pairstat") into all_pairstat
 
    script:
-      r1_bam = aligned_bam[0]
-      r1_prefix = r1_bam.toString() - ~/_bwt2merged.bam$/
-      r2_bam = aligned_bam[1]
-      r2_prefix = r2_bam.toString() - ~/_bwt2merged.bam$/
-      oname = sample.toString() - ~/(\.[0-9]+)$/
-
-      def opts = "-t"
-      opts = params.rm_singleton ? "${opts}" : "--single ${opts}"
-      opts = params.rm_multi ? "${opts}" : "--multi ${opts}"
-      if ("$params.min_mapq".isInteger()) opts="${opts} -q ${params.min_mapq}"
-      """
-      mergeSAM.py -f ${r1_bam} -r ${r2_bam} -o ${sample}_bwt2pairs.bam ${opts}
-      """
+   r1_bam = aligned_bam[0]
+   r1_prefix = r1_bam.toString() - ~/_bwt2merged.bam$/
+   r2_bam = aligned_bam[1]
+   r2_prefix = r2_bam.toString() - ~/_bwt2merged.bam$/
+   oname = sample.toString() - ~/(\.[0-9]+)$/
+
+   def opts = "-t"
+   opts = params.rm_singleton ? "${opts}" : "--single ${opts}"
+   opts = params.rm_multi ? "${opts}" : "--multi ${opts}"
+   if ("$params.min_mapq".isInteger()) opts="${opts} -q ${params.min_mapq}"
+   """
+   mergeSAM.py -f ${r1_bam} -r ${r2_bam} -o ${sample}_bwt2pairs.bam ${opts}
+   """
 }
 
 
@@ -588,59 +617,69 @@ process combine_mapped_files{
 if (!params.dnase){
    process get_valid_interaction{
       tag "$sample"
+      label 'process_low'
       publishDir "${params.outdir}/hic_results/data", mode: 'copy',
    	      saveAs: {filename -> filename.indexOf("*stat") > 0 ? "stats/$filename" : "$filename"}
 
       input:
-         set val(sample), file(pe_bam) from paired_bam
-         file frag_file from res_frag_file.collect()
+      set val(sample), file(pe_bam) from paired_bam
+      file frag_file from res_frag_file.collect()
 
       output:
-         set val(sample), file("*.validPairs") into valid_pairs
-         set val(sample), file("*.validPairs") into valid_pairs_4cool
-         set val(sample), file("*RSstat") into all_rsstat
+      set val(sample), file("*.validPairs") into valid_pairs
+      set val(sample), file("*.validPairs") into valid_pairs_4cool
+      set val(sample), file("*.DEPairs") into de_pairs
+      set val(sample), file("*.SCPairs") into sc_pairs
+      set val(sample), file("*.REPairs") into re_pairs
+      set val(sample), file("*.FiltPairs") into filt_pairs
+      set val(sample), file("*RSstat") into all_rsstat
 
       script:
-         if (params.splitFastq){
-      	    sample = sample.toString() - ~/(\.[0-9]+)$/
-         }
-
-         def opts = ""
-         if ("$params.min_cis_dist".isInteger()) opts="${opts} -d ${params.min_cis_dist}"
-         if ("$params.min_insert_size".isInteger()) opts="${opts} -s ${params.min_insert_size}"
-         if ("$params.max_insert_size".isInteger()) opts="${opts} -l ${params.max_insert_size}"
-         if ("$params.min_restriction_fragment_size".isInteger()) opts="${opts} -t ${params.min_restriction_fragment_size}"
-         if ("$params.max_restriction_fragment_size".isInteger()) opts="${opts} -m ${params.max_restriction_fragment_size}"
-
-         """
-         mapped_2hic_fragments.py -f ${frag_file} -r ${pe_bam} ${opts}
-         """
+      if (params.split_fastq){
+         sample = sample.toString() - ~/(\.[0-9]+)$/
+      }
+
+      def opts = ""
+      if ("$params.min_cis_dist".isInteger()) opts="${opts} -d ${params.min_cis_dist}"
+      if ("$params.min_insert_size".isInteger()) opts="${opts} -s ${params.min_insert_size}"
+      if ("$params.max_insert_size".isInteger()) opts="${opts} -l ${params.max_insert_size}"
+      if ("$params.min_restriction_fragment_size".isInteger()) opts="${opts} -t ${params.min_restriction_fragment_size}"
+      if ("$params.max_restriction_fragment_size".isInteger()) opts="${opts} -m ${params.max_restriction_fragment_size}"
+      if (params.save_interaction_bam) opts="${opts} --sam"
+      prefix = pe_bam.toString() - ~/.bam/
+      """
+      mapped_2hic_fragments.py -f ${frag_file} -r ${pe_bam} --all ${opts}
+      sort -T /tmp/ -k2,2V -k3,3n -k5,5V -k6,6n -o ${prefix}.validPairs ${prefix}.validPairs
+      """
    }
 }
 else{
    process get_valid_interaction_dnase{
       tag "$sample"
+      label 'process_low'
       publishDir "${params.outdir}/hic_results/data", mode: 'copy',
    	      saveAs: {filename -> filename.indexOf("*stat") > 0 ? "stats/$filename" : "$filename"}
 
       input:
-         set val(sample), file(pe_bam) from paired_bam
+      set val(sample), file(pe_bam) from paired_bam
 
       output:
-         set val(sample), file("*.validPairs") into valid_pairs
-         set val(sample), file("*.validPairs") into valid_pairs_4cool
-         set val(sample), file("*RSstat") into all_rsstat
+      set val(sample), file("*.validPairs") into valid_pairs
+      set val(sample), file("*.validPairs") into valid_pairs_4cool
+      set val(sample), file("*RSstat") into all_rsstat
 
       script:
-         if (params.splitFastq){
-      	    sample = sample.toString() - ~/(\.[0-9]+)$/
-         }
-
-         def opts = ""
-         if ("$params.min_cis_dist".isInteger()) opts="${opts} -d ${params.min_cis_dist}"
-	 """
-	 mapped_2hic_dnase.py -r ${pe_bam} ${opts}
-         """
+      if (params.split_fastq){
+         sample = sample.toString() - ~/(\.[0-9]+)$/
+      }
+
+      def opts = ""
+      if ("$params.min_cis_dist".isInteger()) opts="${opts} -d ${params.min_cis_dist}"
+      prefix = pe_bam.toString() - ~/.bam/
+      """
+      mapped_2hic_dnase.py -r ${pe_bam} ${opts}
+      sort -T /tmp/ -k2,2V -k3,3n -k5,5V -k6,6n -o ${prefix}.validPairs ${prefix}.validPairs
+      """
    }
 }
 
@@ -651,16 +690,17 @@ else{
 
 process remove_duplicates {
    tag "$sample"
+   label 'process_highmem'
    publishDir "${params.outdir}/hic_results/data", mode: 'copy',
    	      saveAs: {filename -> filename.indexOf("*stat") > 0 ? "stats/$sample/$filename" : "$filename"}
 
    input:
-     set val(sample), file(vpairs) from valid_pairs.groupTuple()
+   set val(sample), file(vpairs) from valid_pairs.groupTuple()
 
    output:
-     set val(sample), file("*.allValidPairs") into all_valid_pairs
-     set val(sample), file("*.allValidPairs") into all_valid_pairs_4cool
-     file("stats/") into all_mergestat
+   set val(sample), file("*.allValidPairs") into all_valid_pairs
+   set val(sample), file("*.allValidPairs") into all_valid_pairs_4cool
+   file("stats/") into all_mergestat
 
    script:
    if ( params.rm_dup ){
@@ -697,41 +737,41 @@ process remove_duplicates {
 
 process merge_sample {
    tag "$ext"
+   label 'process_low'
    publishDir "${params.outdir}/hic_results/stats/${sample}", mode: 'copy'
 
    input:
-     set val(prefix), file(fstat) from all_mapstat.groupTuple().concat(all_pairstat.groupTuple(), all_rsstat.groupTuple())
+   set val(prefix), file(fstat) from all_mapstat.groupTuple().concat(all_pairstat.groupTuple(), all_rsstat.groupTuple())
 
-  output:
-     file("mstats/") into all_mstats
+   output:
+   file("mstats/") into all_mstats
 
   script:
-     sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2)/
-     if ( (fstat =~ /.mapstat/) ){ ext = "mmapstat" }
-     if ( (fstat =~ /.pairstat/) ){ ext = "mpairstat" }
-     if ( (fstat =~ /.RSstat/) ){ ext = "mRSstat" }
-
-     """
-     mkdir -p mstats/${sample}
-     merge_statfiles.py -f ${fstat} > mstats/${sample}/${prefix}.${ext}
-     """
+  sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2|_1|_2)/
+  if ( (fstat =~ /.mapstat/) ){ ext = "mmapstat" }
+  if ( (fstat =~ /.pairstat/) ){ ext = "mpairstat" }
+  if ( (fstat =~ /.RSstat/) ){ ext = "mRSstat" }
+  """
+  mkdir -p mstats/${sample}
+  merge_statfiles.py -f ${fstat} > mstats/${sample}/${prefix}.${ext}
+  """
 }
 
-
 process build_contact_maps{
    tag "$sample - $mres"
+   label 'process_highmem'
    publishDir "${params.outdir}/hic_results/matrix/raw", mode: 'copy'
 
    when:
-      !params.skip_maps
+   !params.skip_maps
 
    input:
-      set val(sample), file(vpairs), val(mres) from all_valid_pairs.combine(map_res)
-      file chrsize from chromosome_size.collect()
+   set val(sample), file(vpairs), val(mres) from all_valid_pairs.combine(map_res)
+   file chrsize from chromosome_size.collect()
 
    output:
-      file("*.matrix") into raw_maps
-      file "*.bed"
+   file("*.matrix") into raw_maps
+   file "*.bed"
 
    script:
    """
@@ -745,17 +785,18 @@ process build_contact_maps{
 
 process run_ice{
    tag "$rmaps"
+   label 'process_highmem'
    publishDir "${params.outdir}/hic_results/matrix/iced", mode: 'copy'
 
    when:
-      !params.skip_maps && !params.skip_ice
+   !params.skip_maps && !params.skip_ice
 
    input:
-      file(rmaps) from raw_maps
-      file "*.biases"
+   file(rmaps) from raw_maps
+   file "*.biases"
 
    output:
-      file("*iced.matrix") into iced_maps
+   file("*iced.matrix") into iced_maps
 
    script:
    prefix = rmaps.toString() - ~/(\.matrix)?$/
@@ -773,75 +814,73 @@ process run_ice{
  */
 process generate_cool{
    tag "$sample"
+   label 'process_medium'
    publishDir "${params.outdir}/export/cool", mode: 'copy'
 
    when:
-      !params.skip_cool
+   !params.skip_cool
 
    input:
-      set val(sample), file(vpairs) from all_valid_pairs_4cool
-      file chrsize from chromosome_size_cool.collect()
+   set val(sample), file(vpairs) from all_valid_pairs_4cool
+   file chrsize from chromosome_size_cool.collect()
 
    output:
-      file("*mcool") into cool_maps
+   file("*mcool") into cool_maps
 
    script:
    """
-   hicpro2higlass.sh -i $vpairs -r 5000 -c ${chrsize} -n
+   hicpro2higlass.sh -p ${task.cpus} -i $vpairs -r 5000 -c ${chrsize} -n
    """
 }
 
 
 /*
- * STEP 5 - MultiQC
+ * STEP 6 - MultiQC
  */
 process multiqc {
-    publishDir "${params.outdir}/MultiQC", mode: 'copy'
-
-    when:
-       !params.skip_multiqc
+   label 'process_low'
+   publishDir "${params.outdir}/MultiQC", mode: 'copy'
 
-    input:
-       file multiqc_config from ch_multiqc_config
-       file ('input_*/*') from all_mstats.concat(all_mergestat).collect()
-       file ('software_versions/*') from software_versions_yaml
-       file workflow_summary from create_workflow_summary(summary)
+   when:
+   !params.skip_multiqc
 
-    output:
-       file "*multiqc_report.html" into multiqc_report
-       file "*_data"
+   input:
+   file multiqc_config from ch_multiqc_config
+   file (mqc_custom_config) from ch_multiqc_custom_config.collect().ifEmpty([])
+   file ('input_*/*') from all_mstats.concat(all_mergestat).collect()
+   file ('software_versions/*') from software_versions_yaml
+   file workflow_summary from create_workflow_summary(summary)
 
-    script:
-    rtitle = custom_runName ? "--title \"$custom_runName\"" : ''
-    rfilename = custom_runName ? "--filename " + custom_runName.replaceAll('\\W','_').replaceAll('_+','_') + "_multiqc_report" : ''
+   output:
+   file "*multiqc_report.html" into multiqc_report
+   file "*_data"
 
-    """
-    multiqc -f $rtitle $rfilename --config $multiqc_config .
-    """
+   script:
+   rtitle = custom_runName ? "--title \"$custom_runName\"" : ''
+   rfilename = custom_runName ? "--filename " + custom_runName.replaceAll('\\W','_').replaceAll('_+','_') + "_multiqc_report" : ''
+   """
+   multiqc -f $rtitle $rfilename --config $multiqc_config .
+   """
 }
 
-
-
 /*
- * STEP 3 - Output Description HTML
+ * STEP 7 - Output Description HTML
  */
 process output_documentation {
-    publishDir "${params.outdir}/pipeline_info", mode: 'copy'
+   publishDir "${params.outdir}/pipeline_info", mode: 'copy'
 
-    input:
-    file output_docs from ch_output_docs
+   input:
+   file output_docs from ch_output_docs
 
-    output:
-    file "results_description.html"
+   output:
+   file "results_description.html"
 
-    script:
-    """
-    markdown_to_html.r $output_docs results_description.html
-    """
+   script:
+   """
+   markdown_to_html.py $output_docs -o results_description.html
+   """
 }
 
-
-
 /*
  * Completion e-mail notification
  */
@@ -850,8 +889,8 @@ workflow.onComplete {
 
     // Set up the e-mail variables
     def subject = "[nf-core/hic] Successful: $workflow.runName"
-    if(!workflow.success){
-      subject = "[nf-core/hic] FAILED: $workflow.runName"
+    if (!workflow.success) {
+        subject = "[nf-core/hic] FAILED: $workflow.runName"
     }
     def email_fields = [:]
     email_fields['version'] = workflow.manifest.version
@@ -869,10 +908,9 @@ workflow.onComplete {
     email_fields['summary']['Date Completed'] = workflow.complete
     email_fields['summary']['Pipeline script file path'] = workflow.scriptFile
     email_fields['summary']['Pipeline script hash ID'] = workflow.scriptId
-    if(workflow.repository) email_fields['summary']['Pipeline repository Git URL'] = workflow.repository
-    if(workflow.commitId) email_fields['summary']['Pipeline repository Git Commit'] = workflow.commitId
-    if(workflow.revision) email_fields['summary']['Pipeline Git branch/tag'] = workflow.revision
-    if(workflow.container) email_fields['summary']['Docker image'] = workflow.container
+    if (workflow.repository) email_fields['summary']['Pipeline repository Git URL'] = workflow.repository
+    if (workflow.commitId) email_fields['summary']['Pipeline repository Git Commit'] = workflow.commitId
+    if (workflow.revision) email_fields['summary']['Pipeline Git branch/tag'] = workflow.revision
     email_fields['summary']['Nextflow Version'] = workflow.nextflow.version
     email_fields['summary']['Nextflow Build'] = workflow.nextflow.build
     email_fields['summary']['Nextflow Compile Timestamp'] = workflow.nextflow.timestamp
@@ -882,8 +920,8 @@ workflow.onComplete {
     def mqc_report = null
     try {
         if (workflow.success) {
-            mqc_report = multiqc_report.getVal()
-            if (mqc_report.getClass() == ArrayList){
+            mqc_report = ch_multiqc_report.getVal()
+            if (mqc_report.getClass() == ArrayList) {
                 log.warn "[nf-core/hic] Found multiple reports from process 'multiqc', will use only one"
                 mqc_report = mqc_report[0]
             }
@@ -892,6 +930,12 @@ workflow.onComplete {
         log.warn "[nf-core/hic] Could not attach MultiQC report to summary email"
     }
 
+    // Check if we are only sending emails on failure
+    email_address = params.email
+    if (!params.email && params.email_on_fail && !workflow.success) {
+        email_address = params.email_on_fail
+    }
+
     // Render the TXT template
     def engine = new groovy.text.GStringTemplateEngine()
     def tf = new File("$baseDir/assets/email_template.txt")
@@ -904,89 +948,89 @@ workflow.onComplete {
     def email_html = html_template.toString()
 
     // Render the sendmail template
-    def smail_fields = [ email: params.email, subject: subject, email_txt: email_txt, email_html: email_html, baseDir: "$baseDir", mqcFile: mqc_report, mqcMaxSize: params.maxMultiqcEmailFileSize.toBytes() ]
+    def smail_fields = [ email: email_address, subject: subject, email_txt: email_txt, email_html: email_html, baseDir: "$baseDir", mqcFile: mqc_report, mqcMaxSize: params.max_multiqc_email_size.toBytes() ]
     def sf = new File("$baseDir/assets/sendmail_template.txt")
     def sendmail_template = engine.createTemplate(sf).make(smail_fields)
     def sendmail_html = sendmail_template.toString()
 
     // Send the HTML e-mail
-    if (params.email) {
+    if (email_address) {
         try {
-          if( params.plaintext_email ){ throw GroovyException('Send plaintext e-mail, not HTML') }
-          // Try to send HTML e-mail using sendmail
-          [ 'sendmail', '-t' ].execute() << sendmail_html
-          log.info "[nf-core/hic] Sent summary e-mail to $params.email (sendmail)"
+            if (params.plaintext_email) { throw GroovyException('Send plaintext e-mail, not HTML') }
+            // Try to send HTML e-mail using sendmail
+            [ 'sendmail', '-t' ].execute() << sendmail_html
+            log.info "[nf-core/hic] Sent summary e-mail to $email_address (sendmail)"
         } catch (all) {
-          // Catch failures and try with plaintext
-          [ 'mail', '-s', subject, params.email ].execute() << email_txt
-          log.info "[nf-core/hic] Sent summary e-mail to $params.email (mail)"
+            // Catch failures and try with plaintext
+            [ 'mail', '-s', subject, email_address ].execute() << email_txt
+            log.info "[nf-core/hic] Sent summary e-mail to $email_address (mail)"
         }
     }
 
     // Write summary e-mail HTML to a file
-    def output_d = new File( "${params.outdir}/pipeline_info/" )
-    if( !output_d.exists() ) {
-      output_d.mkdirs()
+    def output_d = new File("${params.outdir}/pipeline_info/")
+    if (!output_d.exists()) {
+        output_d.mkdirs()
     }
-    def output_hf = new File( output_d, "pipeline_report.html" )
+    def output_hf = new File(output_d, "pipeline_report.html")
     output_hf.withWriter { w -> w << email_html }
-    def output_tf = new File( output_d, "pipeline_report.txt" )
+    def output_tf = new File(output_d, "pipeline_report.txt")
     output_tf.withWriter { w -> w << email_txt }
 
-    c_reset = params.monochrome_logs ? '' : "\033[0m";
-    c_purple = params.monochrome_logs ? '' : "\033[0;35m";
     c_green = params.monochrome_logs ? '' : "\033[0;32m";
+    c_purple = params.monochrome_logs ? '' : "\033[0;35m";
     c_red = params.monochrome_logs ? '' : "\033[0;31m";
+    c_reset = params.monochrome_logs ? '' : "\033[0m";
 
-    if (workflow.stats.ignoredCountFmt > 0 && workflow.success) {
-      log.info "${c_purple}Warning, pipeline completed, but with errored process(es) ${c_reset}"
-      log.info "${c_red}Number of ignored errored process(es) : ${workflow.stats.ignoredCountFmt} ${c_reset}"
-      log.info "${c_green}Number of successfully ran process(es) : ${workflow.stats.succeedCountFmt} ${c_reset}"
+    if (workflow.stats.ignoredCount > 0 && workflow.success) {
+        log.info "-${c_purple}Warning, pipeline completed, but with errored process(es) ${c_reset}-"
+        log.info "-${c_red}Number of ignored errored process(es) : ${workflow.stats.ignoredCount} ${c_reset}-"
+        log.info "-${c_green}Number of successfully ran process(es) : ${workflow.stats.succeedCount} ${c_reset}-"
     }
 
-    if(workflow.success){
-        log.info "${c_purple}[nf-core/hic]${c_green} Pipeline completed successfully${c_reset}"
+    if (workflow.success) {
+        log.info "-${c_purple}[nf-core/hic]${c_green} Pipeline completed successfully${c_reset}-"
     } else {
         checkHostname()
-        log.info "${c_purple}[nf-core/hic]${c_red} Pipeline completed with errors${c_reset}"
+        log.info "-${c_purple}[nf-core/hic]${c_red} Pipeline completed with errors${c_reset}-"
     }
 
 }
 
 
-def nfcoreHeader(){
+def nfcoreHeader() {
     // Log colors ANSI codes
-    c_reset = params.monochrome_logs ? '' : "\033[0m";
-    c_dim = params.monochrome_logs ? '' : "\033[2m";
     c_black = params.monochrome_logs ? '' : "\033[0;30m";
-    c_green = params.monochrome_logs ? '' : "\033[0;32m";
-    c_yellow = params.monochrome_logs ? '' : "\033[0;33m";
     c_blue = params.monochrome_logs ? '' : "\033[0;34m";
-    c_purple = params.monochrome_logs ? '' : "\033[0;35m";
     c_cyan = params.monochrome_logs ? '' : "\033[0;36m";
+    c_dim = params.monochrome_logs ? '' : "\033[2m";
+    c_green = params.monochrome_logs ? '' : "\033[0;32m";
+    c_purple = params.monochrome_logs ? '' : "\033[0;35m";
+    c_reset = params.monochrome_logs ? '' : "\033[0m";
     c_white = params.monochrome_logs ? '' : "\033[0;37m";
+    c_yellow = params.monochrome_logs ? '' : "\033[0;33m";
 
-    return """    ${c_dim}----------------------------------------------------${c_reset}
+    return """    -${c_dim}--------------------------------------------------${c_reset}-
                                             ${c_green},--.${c_black}/${c_green},-.${c_reset}
     ${c_blue}        ___     __   __   __   ___     ${c_green}/,-._.--~\'${c_reset}
     ${c_blue}  |\\ | |__  __ /  ` /  \\ |__) |__         ${c_yellow}}  {${c_reset}
     ${c_blue}  | \\| |       \\__, \\__/ |  \\ |___     ${c_green}\\`-._,-`-,${c_reset}
                                             ${c_green}`._,._,\'${c_reset}
     ${c_purple}  nf-core/hic v${workflow.manifest.version}${c_reset}
-    ${c_dim}----------------------------------------------------${c_reset}
+    -${c_dim}--------------------------------------------------${c_reset}-
     """.stripIndent()
 }
 
-def checkHostname(){
+def checkHostname() {
     def c_reset = params.monochrome_logs ? '' : "\033[0m"
     def c_white = params.monochrome_logs ? '' : "\033[0;37m"
     def c_red = params.monochrome_logs ? '' : "\033[1;91m"
     def c_yellow_bold = params.monochrome_logs ? '' : "\033[1;93m"
-    if(params.hostnames){
+    if (params.hostnames) {
         def hostname = "hostname".execute().text.trim()
         params.hostnames.each { prof, hnames ->
             hnames.each { hname ->
-                if(hostname.contains(hname) && !workflow.profile.contains(prof)){
+                if (hostname.contains(hname) && !workflow.profile.contains(prof)) {
                     log.error "====================================================\n" +
                             "  ${c_red}WARNING!${c_reset} You are running with `-profile $workflow.profile`\n" +
                             "  but your machine hostname is ${c_white}'$hostname'${c_reset}\n" +
diff --git a/nextflow.config b/nextflow.config
index 356f20058f0aa048851d0fd965078417564701b3..f7a5af77d3153338ba5b149f37262cac2439cd9c 100644
--- a/nextflow.config
+++ b/nextflow.config
@@ -9,8 +9,10 @@
 params {
 
   // Workflow flags
-  // Specify your pipeline's command line flags
-  reads = "*{1,2}.fastq.gz"
+  genome = false
+  reads = "data/*{1,2}.fastq.gz"
+  single_end = false
+
   outdir = './results'
   genome = false
   readPaths = false
@@ -20,32 +22,49 @@ params {
   skip_ice = false
   skip_cool = false
   skip_multiqc = false
+  save_reference = false
+  save_interaction_bam = false
+  save_aligned_intermediates = false
+  
   dnase = false
+  rm_dup = false
+  rm_singleton = false
+  rm_multi = false
+  min_restriction_fragment_size = false
+  max_restriction_fragment_size = false
+  min_insert_size = false
+  max_insert_size = false
+  min_cis_dist = false
 
   // Boilerplate options
+  multiqc_config = false
   name = false
-  multiqc_config = "$baseDir/assets/multiqc_config.yaml"
   email = false
-  maxMultiqcEmailFileSize = 25.MB
+  email_on_fail = false
+  max_multiqc_email_size = 25.MB
   plaintext_email = false
   monochrome_logs = false
   help = false
-  igenomes_base = "./iGenomes"
+  igenomes_base = 's3://ngi-igenomes/igenomes/'
   tracedir = "${params.outdir}/pipeline_info"
-  awsqueue = false
-  awsregion = 'eu-west-1'
-  igenomesIgnore = false
+  igenomes_ignore = false
+
   custom_config_version = 'master'
   custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
   hostnames = false
   config_profile_description = false
   config_profile_contact = false
   config_profile_url = false
+
+  // Defaults only, expecting to be overwritten
+  max_memory = 24.GB
+  max_cpus = 8
+  max_time = 240.h
 }
 
 // Container slug. Stable releases should specify release tag!
 // Developmental code should specify :dev
-process.container = 'nfcore/hic:1.0.0'
+process.container = 'nfcore/hic:1.2.0'
 
 // Load base.config by default for all pipelines
 includeConfig 'conf/base.config'
@@ -62,19 +81,33 @@ includeConfig 'conf/hicpro.config'
 
 // Create profiles
 profiles {
-  awsbatch { includeConfig 'conf/awsbatch.config' }
   conda { process.conda = "$baseDir/environment.yml" }
   debug { process.beforeScript = 'echo $HOSTNAME' }
-  docker { docker.enabled = true }
-  singularity { singularity.enabled = true }
+  docker {
+    docker.enabled = true
+    // Avoid this error:
+    //   WARNING: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.
+    // Testing this in nf-core after discussion here https://github.com/nf-core/tools/pull/351
+    // once this is established and works well, nextflow might implement this behavior as new default.
+    docker.runOptions = '-u \$(id -u):\$(id -g)'
+  }
+  singularity {
+    singularity.enabled = true
+    singularity.autoMounts = true
+  }
   test { includeConfig 'conf/test.config' }
 }
 
 // Load igenomes.config if required
-if(!params.igenomesIgnore){
+if (!params.igenomes_ignore) {
   includeConfig 'conf/igenomes.config'
 }
 
+// Export this variable to prevent local Python libraries from conflicting with those in the container
+env {
+  PYTHONNOUSERSITE = 1
+}
+
 // Capture exit codes from upstream processes when piping
 process.shell = ['/bin/bash', '-euo', 'pipefail']
 
@@ -101,16 +134,16 @@ manifest {
   homePage = 'https://github.com/nf-core/hic'
   description = 'Analysis of Chromosome Conformation Capture data (Hi-C)'
   mainScript = 'main.nf'
-  nextflowVersion = '>=0.32.0'
-  version = '1.0.0'
+  nextflowVersion = '>=19.10.0'
+  version = '1.2.0'
 }
 
 // Function to ensure that resource requirements don't go beyond
 // a maximum limit
 def check_max(obj, type) {
-  if(type == 'memory'){
+  if (type == 'memory') {
     try {
-      if(obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
+      if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
         return params.max_memory as nextflow.util.MemoryUnit
       else
         return obj
@@ -118,9 +151,9 @@ def check_max(obj, type) {
       println "   ### ERROR ###   Max memory '${params.max_memory}' is not valid! Using default value: $obj"
       return obj
     }
-  } else if(type == 'time'){
+  } else if (type == 'time') {
     try {
-      if(obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
+      if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
         return params.max_time as nextflow.util.Duration
       else
         return obj
@@ -128,7 +161,7 @@ def check_max(obj, type) {
       println "   ### ERROR ###   Max time '${params.max_time}' is not valid! Using default value: $obj"
       return obj
     }
-  } else if(type == 'cpus'){
+  } else if (type == 'cpus') {
     try {
       return Math.min( obj, params.max_cpus as int )
     } catch (all) {