diff --git a/.editorconfig b/.editorconfig
index b6b3190776e8d7f8894ed6484494018355814fc6..43c7138733277a83ffc7e788e0283df17f6ccf71 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -22,3 +22,11 @@ indent_size = unset
 
 [/assets/email*]
 indent_size = unset
+
+# C++ compiles code
+[/bin/cutsite_trimming]
+end_of_line = unset
+insert_final_newline = unset
+trim_trailing_whitespace = unset
+indent_style = unset
+indent_size = unset
diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml
index 014f094c61261d53c41ebe43d9a6c3e786d6a3bb..2b99a42b8738de2bf1f214c5f3d267915b5178ac 100644
--- a/.github/workflows/awsfulltest.yml
+++ b/.github/workflows/awsfulltest.yml
@@ -14,10 +14,7 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - name: Launch workflow via tower
-        uses: seqeralabs/action-tower-launch@v1
-        # TODO nf-core: You can customise AWS full pipeline tests as required
-        # Add full size test data (but still relatively small datasets for few samples)
-        # on the `test_full.config` test runs with only one set of parameters
+        uses: nf-core/tower-action@v3
         with:
           workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }}
           access_token: ${{ secrets.TOWER_ACCESS_TOKEN }}
@@ -27,7 +24,7 @@ jobs:
             {
               "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/hic/results-${{ github.sha }}"
             }
-          profiles: test_full,aws_tower
+          profiles: test_full,public_aws_ecr
       - uses: actions/upload-artifact@v3
         with:
           name: Tower debug log file
diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml
index e7311630f36239a1b58fd7a88c3dd1a07c2f2ea6..23c4973be1dea45f100e25c3f1b66486274b7f56 100644
--- a/.github/workflows/awstest.yml
+++ b/.github/workflows/awstest.yml
@@ -22,7 +22,7 @@ jobs:
             {
               "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/hic/results-test-${{ github.sha }}"
             }
-          profiles: test,aws_tower
+          profiles: test,public_aws_ecr
       - uses: actions/upload-artifact@v3
         with:
           name: Tower debug log file
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index cafa44a9680031ad06866987ee8e5e74a23babdd..83b111b5d502d72f175c685c5af89e786a53b320 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -31,13 +31,10 @@ jobs:
         uses: actions/checkout@v3
 
       - name: Install Nextflow
-        uses: nf-core/setup-nextflow@v1
+        uses: nf-core/setup-nextflow@v1.3.0
         with:
           version: "${{ matrix.NXF_VER }}"
 
       - name: Run pipeline with test data
-        # TODO nf-core: You can customise CI pipeline run tests as required
-        # For example: adding multiple test runs with different parameters
-        # Remember that you can parallelise this by using strategy.matrix
         run: |
           nextflow run ${GITHUB_WORKSPACE} -profile test,docker --outdir ./results
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3a475fb42f5c0962af397e653120a04587c818c9..ea99e6775dda65525ef2970bf0e4cd2a8bb7ba45 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,14 +3,160 @@
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
-## v2.1.0dev - [date]
+## v2.1.0dev
 
-Initial release of nf-core/hic, created with the [nf-core](https://nf-co.re/) template.
+### `Added`
+
+- Added public_aws_ecr profile for using containers stored on ECR.
+
+### `Fixed`
+
+## v2.0.0 - 2023-01-12
 
 ### `Added`
 
+- DSL2 version of nf-core-hic pipeline
+- Add full test dataset (#80)
+- Replace local modules by the cooler nf-core module
+
+### `Fixed`
+
+- Fix error in the Arima preset (#127)
+
+## v1.3.1 - 2021-09-25
+
+### `Fixed`
+
+- Fix bug in conda environment for cooltools (#109)
+
+## v1.3.0 - 2021-05-22
+
+- Change the `/tmp/` folder to `./tmp/` folder so that all tmp files are now in the work directory (#24)
+- Add `--hicpro_maps` options to generate the raw and normalized HiC-Pro maps. The default is now to use cooler
+- Add chromosome compartments calling with cooltools (#53)
+- Add HiCExplorer distance decay quality control (#54)
+- Add HiCExplorer TADs calling (#55)
+- Add insulation score TADs calling (#55)
+- Generate cooler/txt contact maps
+- Normalize Hi-C data with cooler instead of iced
+- New `--digestion` parameter to automatically set the restriction_site and ligation_site motifs
+- New `--keep_multi` and `keep_dup` options. Default: false
+- Template update for nf-core/tools
+- Minor fix to summary log messages in pipeline header
+
 ### `Fixed`
 
-### `Dependencies`
+- Fix bug in stats report which were not all correcly exported in the results folder
+- Fix recurrent bug in input file extension (#86)
+- Fix bug in `--bin_size` parameter (#85)
+- `--min_mapq` is ignored if `--keep_multi` is used
 
 ### `Deprecated`
+
+- `--rm_dup` and `--rm_multi` are replaced by `--keep_dups` and `--keep_multi`
+
+## v1.2.2 - 2020-09-02
+
+### `Added`
+
+- Template update for nf-core/tools v1.10.2
+- Add the `--fastq_chunks_size` to specify the number of reads per chunks if split_fastq is true
+
+### `Fixed`
+
+- Bug in `--split_fastq` option not recognized
+
+## v1.2.1 - 2020-07-06
+
+### `Fixed`
+
+- Fix issue with `--fasta` option and `.fa` extension (#66)
+
+## v1.2.0 - 2020-06-18
+
+### `Added`
+
+- Bump v1.2.0
+- Merge template nf-core 1.9
+- Move some options to camel_case
+- Update python scripts for python3
+- Update conda environment file
+  - python base `2.7.15` > `3.7.6`
+  - pip `19.1` > `20.0.1`
+  - scipy `1.2.1` > `1.4.1`
+  - numpy `1.16.3` > `1.18.1`
+  - bx-python `0.8.2` > `0.8.8`
+  - pysam `0.15.2` > `0.15.4`
+  - cooler `0.8.5` > `0.8.6`
+  - multiqc `1.7` > `1.8`
+  - iced `0.5.1` > `0.5.6`
+  - _*New*_ pymdown-extensions `7.1`
+  - _*New*_ hicexplorer `3.4.3`
+  - _*New*_ bioconductor-hitc `1.32.0`
+  - _*New*_ r-optparse `1.6.6`
+  - _*New*_ ucsc-bedgraphtobigwig `377`
+  - _*New*_ cython `0.29.19`
+  - _*New*_ cooltools `0.3.2`
+  - _*New*_ fanc `0.8.30`
+  - _*Removed*_ r-markdown
+
+### `Fixed`
+
+- Fix error in doc for Arima kit usage
+- Sort output of `get_valid_interaction` process as the input files of `remove_duplicates`
+  are expected to be sorted (sort -m)
+
+### `Deprecated`
+
+- Command line options converted to `camel_case`:
+  - `--skipMaps` > `--skip_maps`
+  - `--skipIce` > `--skip_ice`
+  - `--skipCool` > `--skip_cool`
+  - `--skipMultiQC` > `--skip_multiqc`
+  - `--saveReference` > `--save_reference`
+  - `--saveAlignedIntermediates` > `--save_aligned_intermediates`
+  - `--saveInteractionBAM` > `--save_interaction_bam`
+
+## v1.1.1 - 2020-04-02
+
+### `Fixed`
+
+- Fix bug in tag. Remove '['
+
+## v1.1.0 - 2019-10-15
+
+### `Added`
+
+- Update hicpro2higlass with `-p` parameter
+- Support 'N' base motif in restriction/ligation sites
+- Support multiple restriction enzymes/ligattion sites (comma separated) ([#31](https://github.com/nf-core/hic/issues/31))
+- Add --saveInteractionBAM option
+- Add DOI ([#29](https://github.com/nf-core/hic/issues/29))
+- Update manual ([#28](https://github.com/nf-core/hic/issues/28))
+
+### `Fixed`
+
+- Fix bug for reads extension `_1`/`_2` ([#30](https://github.com/nf-core/hic/issues/30))
+
+## v1.0 - [2019-05-06]
+
+Initial release of nf-core/hic, created with the [nf-core](http://nf-co.re/) template.
+
+### `Added`
+
+First version of nf-core Hi-C pipeline which is a Nextflow implementation of
+the [HiC-Pro pipeline](https://github.com/nservant/HiC-Pro/).
+Note that all HiC-Pro functionalities are not yet all implemented.
+The current version supports most protocols including Hi-C, in situ Hi-C,
+DNase Hi-C, Micro-C, capture-C or HiChip data.
+
+In summary, this version allows :
+
+- Automatic detection and generation of annotation files based on igenomes
+  if not provided.
+- Two-steps alignment of raw sequencing reads
+- Reads filtering and detection of valid interaction products
+- Generation of raw contact matrices for a set of resolutions
+- Normalization of the contact maps using the ICE algorithm
+- Generation of cooler file for visualization on [higlass](https://higlass.io/)
+- Quality report based on HiC-Pro MultiQC module
diff --git a/CITATIONS.md b/CITATIONS.md
index 07d08ce645d96440e9ae8f094c69800e36448574..0313a1a90267497fb6e5ccc8788838163da43100 100644
--- a/CITATIONS.md
+++ b/CITATIONS.md
@@ -1,5 +1,9 @@
 # nf-core/hic: Citations
 
+## [HiC-Pro](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-015-0831-x)
+
+> Servant N, Varoquaux N, Lajoie BR, Viara E, Chen C, Vert JP, Dekker J, Heard E, Barillot E. Genome Biology 2015, 16:259 doi: [10.1186/s13059-015-0831-x](https://dx.doi.org/10.1186/s13059-015-0831-x)
+
 ## [nf-core](https://pubmed.ncbi.nlm.nih.gov/32055031/)
 
 > Ewels PA, Peltzer A, Fillinger S, Patel H, Alneberg J, Wilm A, Garcia MU, Di Tommaso P, Nahnsen S. The nf-core framework for community-curated bioinformatics pipelines. Nat Biotechnol. 2020 Mar;38(3):276-278. doi: 10.1038/s41587-020-0439-x. PubMed PMID: 32055031.
diff --git a/README.md b/README.md
index 354b9804d3eb5745618f047420de1c45f62068d4..be26c0717863fb727d11ac636e7b7c3b3b90fa1e 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
 # ![nf-core/hic](docs/images/nf-core-hic_logo_light.png#gh-light-mode-only) ![nf-core/hic](docs/images/nf-core-hic_logo_dark.png#gh-dark-mode-only)
 
-[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/hic/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX)
+[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/hic/results)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.2669512-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.2669512)
 
 [![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)
 [![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)
@@ -12,20 +12,29 @@
 
 ## Introduction
 
-**nf-core/hic** is a bioinformatics pipeline that ...
+**nf-core/hic** is a bioinformatics best-practice analysis pipeline for Analysis of Chromosome Conformation Capture data (Hi-C).
 
-<!-- TODO nf-core:
-   Complete this sentence with a 2-3 sentence summary of what types of data the pipeline ingests, a brief overview of the
-   major pipeline sections and the types of output it produces. You're giving an overview to someone new
-   to nf-core here, in 15-20 seconds. For an example, see https://github.com/nf-core/rnaseq/blob/master/README.md#introduction
--->
+The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!
 
-<!-- TODO nf-core: Include a figure that guides the user through the major workflow steps. Many nf-core
-     workflows use the "tube map" design for that. See https://nf-co.re/docs/contributing/design_guidelines#examples for examples.   -->
-<!-- TODO nf-core: Fill in short bullet-pointed list of the default steps in the pipeline -->
+On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources.The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/hic/results).
+
+## Pipeline summary
 
 1. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/))
-2. Present QC for raw reads ([`MultiQC`](http://multiqc.info/))
+2. Hi-C data processing
+   1. [`HiC-Pro`](https://github.com/nservant/HiC-Pro)
+      1. Mapping using a two steps strategy to rescue reads spanning the ligation
+         sites ([`bowtie2`](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml))
+      2. Detection of valid interaction products
+      3. Duplicates removal
+      4. Generate raw and normalized contact maps ([`iced`](https://github.com/hiclib/iced))
+3. Create genome-wide contact maps at various resolutions ([`cooler`](https://github.com/open2c/cooler))
+4. Contact maps normalization using balancing algorithm ([`cooler`](https://github.com/open2c/cooler))
+5. Export to various contact maps formats ([`HiC-Pro`](https://github.com/nservant/HiC-Pro), [`cooler`](https://github.com/open2c/cooler))
+6. Quality controls ([`HiC-Pro`](https://github.com/nservant/HiC-Pro), [`HiCExplorer`](https://github.com/deeptools/HiCExplorer))
+7. Compartments calling ([`cooltools`](https://cooltools.readthedocs.io/en/latest/))
+8. TADs calling ([`HiCExplorer`](https://github.com/deeptools/HiCExplorer), [`cooltools`](https://cooltools.readthedocs.io/en/latest/))
+9. Quality control report ([`MultiQC`](https://multiqc.info/))
 
 ## Usage
 
@@ -34,8 +43,6 @@
 > to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)
 > with `-profile test` before running the workflow on actual data.
 
-<!-- TODO nf-core: Describe the minimum required steps to execute the pipeline, e.g. how to prepare samplesheets.
-     Explain what rows and columns represent. For instance (please edit as appropriate):
 
 First, prepare a samplesheet with your input data that looks as follows:
 
@@ -43,21 +50,17 @@ First, prepare a samplesheet with your input data that looks as follows:
 
 ```csv
 sample,fastq_1,fastq_2
-CONTROL_REP1,AEG588A1_S1_L002_R1_001.fastq.gz,AEG588A1_S1_L002_R2_001.fastq.gz
+HIC_ES_4,SRR5339783_1.fastq.gz,SRR5339783_2.fastq.gz
 ```
 
-Each row represents a fastq file (single-end) or a pair of fastq files (paired end).
-
--->
-
+Each row represents a pair of fastq files (paired end).
 Now, you can run the pipeline using:
 
-<!-- TODO nf-core: update the following command to include all required parameters for a minimal example -->
-
 ```bash
 nextflow run nf-core/hic \
    -profile <docker/singularity/.../institute> \
    --input samplesheet.csv \
+   --genome GRCh37 \
    --outdir <OUTDIR>
 ```
 
@@ -78,10 +81,6 @@ For more details about the output files and reports, please refer to the
 
 nf-core/hic was originally written by Nicolas Servant.
 
-We thank the following people for their extensive assistance in the development of this pipeline:
-
-<!-- TODO nf-core: If applicable, make list of people who have also contributed -->
-
 ## Contributions and Support
 
 If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).
@@ -90,10 +89,7 @@ For further information or help, don't hesitate to get in touch on the [Slack `#
 
 ## Citations
 
-<!-- TODO nf-core: Add citation for pipeline after first release. Uncomment lines below and update Zenodo doi and badge at the top of this file. -->
-<!-- If you use  nf-core/hic for your analysis, please cite it using the following doi: [10.5281/zenodo.XXXXXX](https://doi.org/10.5281/zenodo.XXXXXX) -->
-
-<!-- TODO nf-core: Add bibliography of tools and data used in your pipeline -->
+If you use nf-core/hic for your analysis, please cite it using the following doi: doi: [10.5281/zenodo.2669512](https://doi.org/10.5281/zenodo.2669512)
 
 An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.
 
diff --git a/assets/methods_description_template.yml b/assets/methods_description_template.yml
index 5b51ef5abc4fbe18bf0da19167f4d0506e14d0a2..2f0a30855b2ab842a1f5b16cb9f5ce8437f96dbd 100644
--- a/assets/methods_description_template.yml
+++ b/assets/methods_description_template.yml
@@ -3,7 +3,7 @@ description: "Suggested text and references to use when describing pipeline usag
 section_name: "nf-core/hic Methods Description"
 section_href: "https://github.com/nf-core/hic"
 plot_type: "html"
-## TODO nf-core: Update the HTML below to your prefered methods description, e.g. add publication citation for this pipeline
+## nf-core: Update the HTML below to your prefered methods description, e.g. add publication citation for this pipeline
 ## You inject any metadata in the Nextflow '${workflow}' object
 data: |
   <h4>Methods</h4>
@@ -12,6 +12,7 @@ data: |
   <pre><code>${workflow.commandLine}</code></pre>
   <h4>References</h4>
   <ul>
+    <li>Servant, N., Ewels, P. A., Peltzer, A., Garcia, M. U. (2021) nf-core/hic. Zenodo. https://doi.org/10.5281/zenodo.2669512</li>
     <li>Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316-319. <a href="https://doi.org/10.1038/nbt.3820">https://doi.org/10.1038/nbt.3820</a></li>
     <li>Ewels, P. A., Peltzer, A., Fillinger, S., Patel, H., Alneberg, J., Wilm, A., Garcia, M. U., Di Tommaso, P., & Nahnsen, S. (2020). The nf-core framework for community-curated bioinformatics pipelines. Nature Biotechnology, 38(3), 276-278. <a href="https://doi.org/10.1038/s41587-020-0439-x">https://doi.org/10.1038/s41587-020-0439-x</a></li>
   </ul>
diff --git a/assets/samplesheet.csv b/assets/samplesheet.csv
index 5f653ab7bfc86c905b720d2bb8708646bb66366e..e699919c0e9610e4082734cdd164b3629cb8c4a2 100644
--- a/assets/samplesheet.csv
+++ b/assets/samplesheet.csv
@@ -1,3 +1,2 @@
 sample,fastq_1,fastq_2
-SAMPLE_PAIRED_END,/path/to/fastq/files/AEG588A1_S1_L002_R1_001.fastq.gz,/path/to/fastq/files/AEG588A1_S1_L002_R2_001.fastq.gz
-SAMPLE_SINGLE_END,/path/to/fastq/files/AEG588A4_S4_L003_R1_001.fastq.gz,
+SRR4292758,https://github.com/nf-core/test-datasets/raw/hic/data/SRR4292758_00_R1.fastq.gz,https://github.com/nf-core/test-datasets/raw/hic/data/SRR4292758_00_R2.fastq.gz
diff --git a/bin/build_matrix b/bin/build_matrix
new file mode 100755
index 0000000000000000000000000000000000000000..15aa38e07e09efee5ed6c688f90ae0af21365393
Binary files /dev/null and b/bin/build_matrix differ
diff --git a/bin/check_samplesheet.py b/bin/check_samplesheet.py
index 4a758fe0036e6de4ce523890164d21ecbbfc56aa..dde3baaa26969f03313e3d6070633770def3c738 100755
--- a/bin/check_samplesheet.py
+++ b/bin/check_samplesheet.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python
 
-
 """Provide a command line tool to validate and transform tabular samplesheets."""
 
 
@@ -127,7 +126,7 @@ class RowChecker:
         for row in self.modified:
             sample = row[self._sample_col]
             seen[sample] += 1
-            row[self._sample_col] = f"{sample}_T{seen[sample]}"
+            ##row[self._sample_col] = f"{sample}_T{seen[sample]}"
 
 
 def read_head(handle, num_lines=10):
diff --git a/bin/cutsite_trimming b/bin/cutsite_trimming
new file mode 100755
index 0000000000000000000000000000000000000000..0edd84b91b5bdaa96e3cd6c33bb16e13601ce6d9
Binary files /dev/null and b/bin/cutsite_trimming differ
diff --git a/bin/digest_genome.py b/bin/digest_genome.py
new file mode 100755
index 0000000000000000000000000000000000000000..9f05b45b828e81abe35c146cc4b598334cf43916
--- /dev/null
+++ b/bin/digest_genome.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+
+# HiC-Pro
+# Copyleft 2015 Institut Curie
+# Author(s): Nelle Varoquaux, Nicolas Servant
+# Contact: nicolas.servant@curie.fr
+# This software is distributed without any guarantee under the terms of the
+# GNU General
+# Public License, either Version 2, June 1991 or Version 3, June 2007.
+
+"""
+Script to extract restriction fragment from a fasta file and output a BED file
+"""
+
+import argparse
+import re
+import os
+import sys
+import numpy as np
+
+RE_cutsite = {"mboi": ["^GATC"], "dpnii": ["^GATC"], "bglii": ["A^GATCT"], "hindiii": ["A^AGCTT"]}
+
+
+def find_re_sites(filename, sequences, offset):
+    with open(filename, "r") as infile:
+        chr_id = None
+        big_str = ""
+        indices = []
+        all_indices = []
+        contig_names = []
+        c = 0
+        for line in infile:
+            c += 1
+            if line.startswith(">"):
+                print("{}...".format(line.split()[0][1:]))
+                # If this is not the first chromosome, find the indices and append
+                # them to the list
+                if chr_id is not None:
+                    for rs in range(len(sequences)):
+                        pattern = "(?={})".format(sequences[rs].lower())
+                        indices += [m.start() + offset[rs] for m in re.finditer(pattern, big_str)]
+                    indices.sort()
+                    all_indices.append(indices)
+                    indices = []
+
+                # This is a new chromosome. Empty the sequence string, and add the
+                # correct chrom id
+                big_str = ""
+                chr_id = line.split()[0][1:]
+                if chr_id in contig_names:
+                    print("The fasta file contains several instance of {}. Exit.".format(chr_id))
+                    sys.exit(-1)
+                contig_names.append(chr_id)
+            else:
+                # As long as we don't change chromosomes, continue reading the
+                # file, and appending the sequences
+                big_str += line.lower().strip()
+        # Add the indices for the last chromosome
+        for rs in range(len(sequences)):
+            pattern = "(?={})".format(sequences[rs].lower())
+            indices += [m.start() + offset[rs] for m in re.finditer(pattern, big_str)]
+        indices.sort()
+        all_indices.append(indices)
+
+    return contig_names, all_indices
+
+
+def find_chromsomose_lengths(reference_filename):
+    chromosome_lengths = []
+    chromosome_names = []
+    length = None
+    with open(reference_filename, "r") as infile:
+        for line in infile:
+            if line.startswith(">"):
+                chromosome_names.append(line[1:].strip())
+                if length is not None:
+                    chromosome_lengths.append(length)
+                length = 0
+            else:
+                length += len(line.strip())
+        chromosome_lengths.append(length)
+    return chromosome_names, np.array(chromosome_lengths)
+
+
+def replaceN(cs):
+    npos = int(cs.find("N"))
+    cseql = []
+    if npos != -1:
+        for nuc in ["A", "C", "G", "T"]:
+            tmp = cs.replace("N", nuc, 1)
+            tmpl = replaceN(tmp)
+            if type(tmpl) == list:
+                cseql = cseql + tmpl
+            else:
+                cseql.append(tmpl)
+    else:
+        cseql.append(cs)
+    return cseql
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument("fastafile")
+    parser.add_argument(
+        "-r",
+        "--restriction_sites",
+        dest="res_sites",
+        nargs="+",
+        help=(
+            "The cutting position has to be specified using "
+            "'^'. For instance, -r A^AGCTT for HindIII "
+            "digestion. Several restriction enzyme can be "
+            "specified."
+        ),
+    )
+    parser.add_argument("-o", "--out", default=None)
+    args = parser.parse_args()
+
+    filename = args.fastafile
+    out = args.out
+
+    # Split restriction sites if comma-separated
+    cutsites = []
+    for s in args.res_sites:
+        for m in s.split(","):
+            cutsites.append(m)
+
+    # process args and get restriction enzyme sequences
+    sequences = []
+    offset = []
+    for cs in cutsites:
+        if cs.lower() in RE_cutsite:
+            cseq = "".join(RE_cutsite[cs.lower()])
+        else:
+            cseq = cs
+
+        offpos = int(cseq.find("^"))
+        if offpos == -1:
+            print(
+                "Unable to detect offset for {}. Please, use '^' to specify the cutting position,\
+                   i.e A^GATCT for HindIII digestion.".format(
+                    cseq
+                )
+            )
+            sys.exit(-1)
+
+        for nuc in list(set(cs)):
+            if nuc not in ["A", "T", "G", "C", "N", "^"]:
+                print("Find unexpected character ['{}']in restriction motif".format(nuc))
+                print("Note that multiple motifs should be separated by a space (not a comma !)")
+
+                sys.exit(-1)
+
+        offset.append(offpos)
+        sequences.append(re.sub("\^", "", cseq))
+
+    # replace all N in restriction motif
+    sequences_without_N = []
+    offset_without_N = []
+    for rs in range(len(sequences)):
+        nrs = replaceN(sequences[rs])
+        sequences_without_N = sequences_without_N + nrs
+        offset_without_N = offset_without_N + [offset[rs]] * len(nrs)
+
+    sequences = sequences_without_N
+    offset = offset_without_N
+
+    if out is None:
+        out = os.path.splitext(filename)[0] + "_fragments.bed"
+
+    print("Analyzing", filename)
+    print("Restriction site(s)", ",".join(sequences))
+    print("Offset(s)", ",".join(str(x) for x in offset))
+
+    # Read fasta file and look for rs per chromosome
+    contig_names, all_indices = find_re_sites(filename, sequences, offset=offset)
+    _, lengths = find_chromsomose_lengths(filename)
+
+    valid_fragments = []
+    for i, indices in enumerate(all_indices):
+        valid_fragments_chr = np.concatenate(
+            [np.concatenate([[0], indices])[:, np.newaxis], np.concatenate([indices, [lengths[i]]])[:, np.newaxis]],
+            axis=1,
+        )
+        valid_fragments.append(valid_fragments_chr)
+
+    # Write results
+    print("Writing to {} ...".format(out))
+    with open(out, "w") as outfile:
+        for chrom_name, indices in zip(contig_names, valid_fragments):
+            frag_id = 0
+            for begin, end in indices:
+                # allow to remove cases where the enzyme cut at
+                # the first position of the chromosome
+                if end > begin:
+                    frag_id += 1
+                    frag_name = "HIC_{}_{}".format(str(chrom_name), int(frag_id))
+                    outfile.write(
+                        "{}\t{}\t{}\t{}\t0\t+\n".format(str(chrom_name), int(begin), int(end), str(frag_name))
+                    )
diff --git a/bin/hicpro_merge_validpairs.sh b/bin/hicpro_merge_validpairs.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e6c5200bb9e09fc9b21302c6e6aae56a5468276a
--- /dev/null
+++ b/bin/hicpro_merge_validpairs.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+set -e
+
+##
+## HiC-Pro
+## Internal function
+## Merge valid interactions files and remove duplicates
+##
+
+rmDup=0
+prefix=""
+while getopts ":dp:" opt; do
+    case "$opt" in
+        d) rmDup=1 ;;
+        p) prefix=$OPTARG ;;
+    esac
+done
+shift $(( OPTIND - 1 ))
+
+vpairs="$@"
+vpairs_sorted=$(echo $vpairs | sed -e 's/validPairs/sorted.validPairs/g')
+
+mkdir -p ./tmp/
+
+if [[ ${rmDup} == 1 ]]; then
+    ## Sort individual validPairs files
+    fcounts=0
+    for vfile in ${vpairs}
+    do
+        echo "Sorting ${vfile} ..."
+        fcounts=$((fcounts+1))
+        ofile=$(echo ${vfile} | sed -e 's/validPairs/sorted.validPairs/')
+        #sort -k2,2V -k3,3n -k5,5V -k6,6n -T ./tmp/ -o ${ofile} ${vfile}
+        sort -k2,2 -k5,5 -k3,3n -k6,6n -T ./tmp/ -o ${ofile} ${vfile}
+    done
+
+    if [[ $fcounts -gt 1 ]]
+    then
+        echo "Merging and removing the duplicates ..."
+        ## Sort valid pairs and remove read pairs with same starts (i.e duplicated read pairs)
+        #sort -k2,2V -k3,3n -k5,5V -k6,6n -T ./tmp/ -m ${vpairs_sorted} | \
+        sort -k2,2 -k5,5 -k3,3n -k6,6n -T ./tmp/ -m ${vpairs_sorted} | \
+            awk -F"\t" 'BEGIN{c1=0;c2=0;s1=0;s2=0}(c1!=$2 || c2!=$5 || s1!=$3 || s2!=$6){print;c1=$2;c2=$5;s1=$3;s2=$6}' > ${prefix}.allValidPairs
+    else
+        echo "Removing the duplicates ..."
+        cat ${vpairs_sorted} | awk -F"\t" 'BEGIN{c1=0;c2=0;s1=0;s2=0}(c1!=$2 || c2!=$5 || s1!=$3 || s2!=$6){print;c1=$2;c2=$5;s1=$3;s2=$6}' > ${prefix}.allValidPairs
+    fi
+
+    ## clean
+    /bin/rm -rf ${vpairs_sorted}
+else
+    cat ${vpairs} > ${prefix}.allValidPairs
+fi
+
+echo -e -n "valid_interaction\t" > ${prefix}_allValidPairs.mergestat
+cat ${vpairs} | wc -l >> ${prefix}_allValidPairs.mergestat
+echo -e -n "valid_interaction_rmdup\t" >> ${prefix}_allValidPairs.mergestat
+cat ${prefix}.allValidPairs | wc -l >> ${prefix}_allValidPairs.mergestat
+
+## Count short range (<20000) vs long range contacts
+awk 'BEGIN{cis=0;trans=0;sr=0;lr=0} $2 == $5{cis=cis+1; d=$6>$3?$6-$3:$3-$6; if (d<=20000){sr=sr+1}else{lr=lr+1}} $2!=$5{trans=trans+1}END{print "trans_interaction\t"trans"\ncis_interaction\t"cis"\ncis_shortRange\t"sr"\ncis_longRange\t"lr}' ${prefix}.allValidPairs >> ${prefix}_allValidPairs.mergestat
+
+## clean
+/bin/rm -rf ./tmp/
diff --git a/bin/mapped_2hic_dnase.py b/bin/mapped_2hic_dnase.py
new file mode 100755
index 0000000000000000000000000000000000000000..c417907a4483bd33bc6a6e0ceeb752d5c3d32121
--- /dev/null
+++ b/bin/mapped_2hic_dnase.py
@@ -0,0 +1,502 @@
+#!/usr/bin/env python
+
+# HiC-Pro
+# Copyleft 2015 Institut Curie
+# Author(s): Nicolas Servant, Eric Viara
+# Contact: nicolas.servant@curie.fr
+# This software is distributed without any guarantee under the terms of the
+# GNU General
+# Public License, either Version 2, June 1991 or Version 3, June 2007.
+
+"""
+Script to keep only valid pairs when no restriction enzyme are used (i.e. DNAse or Micro-HiC)
+"""
+
+import getopt
+import sys
+import os
+import re
+import pysam
+
+
+def usage():
+    """Usage function"""
+    print("Usage : python mapped_2hic_dnase.py")
+    print("-r/--mappedReadsFile <BAM/SAM file of mapped reads>")
+    print("[-o/--outputDir] <Output directory. Default is current directory>")
+    print("[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>")
+    print(
+        "[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>"
+    )
+    print(
+        "[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>"
+    )
+    print("[-v/--verbose] <Verbose>")
+    print("[-h/--help] <Help>")
+    return
+
+
+def get_args():
+    """Get argument"""
+    try:
+        opts, args = getopt.getopt(
+            sys.argv[1:],
+            "r:o:d:g:avh",
+            ["mappedReadsFile=", "outputDir=", "minDist=", "gatg", "all", "verbose", "help"],
+        )
+    except getopt.GetoptError:
+        usage()
+        sys.exit(-1)
+    return opts
+
+
+def get_read_strand(read):
+    """
+    Conversion of read position to naive strand representation
+
+    Parameters
+    ----------
+    read : list
+        list of aligned reads
+    """
+    strand = "+"
+    if read.is_reverse:
+        strand = "-"
+    return strand
+
+
+def get_read_pos(read, st="start"):
+    """
+    Return the read position (zero-based) used for the intersection with
+    the restriction fragment
+
+    The 5' end is not a good choice for the reverse reads (which contain part
+    of the restriction site, and thus overlap the next restriction fragment)
+    Using the left-most position (5' for forward, 3' for reverse) or the
+    middle of the read should work but the middle of the reads might be more
+    safe
+
+    Parameters
+    -----------
+    read : list
+        list of aligned reads
+    """
+    if st == "middle":
+        pos = read.reference_start + int(read.alen / 2)
+    elif st == "start":
+        pos = get_read_start(read)
+    elif st == "left":
+        pos = read.reference_start
+
+    return pos
+
+
+def get_read_start(read):
+    """
+    Return the 5' end of the read
+    """
+    if read.is_reverse:
+        pos = read.reference_start + read.alen - 1
+    else:
+        pos = read.reference_start
+    return pos
+
+
+def get_ordered_reads(read1, read2):
+    """
+    Reorient reads
+
+    The sequencing is usually not oriented. Reorient the reads so that r1 is
+    always before r2
+
+    read1 = [AlignedRead]
+    read2 = [AlignedRead]
+    """
+    if read1.reference_id == read2.reference_id:
+        if get_read_pos(read1) < get_read_pos(read2):
+            r1, r2 = read1, read2
+        else:
+            r1, r2 = read2, read1
+    else:
+        if read1.reference_id < read2.reference_id:
+            r1, r2 = read1, read2
+        else:
+            r1, r2 = read2, read1
+
+    return r1, r2
+
+
+def isIntraChrom(read1, read2):
+    """
+    Return true is the reads pair is intrachromosomal
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+
+    """
+    if read1.reference_id == read2.reference_id:
+        return True
+    else:
+        return False
+
+
+def get_valid_orientation(read1, read2):
+    """
+    Both reads are expected to be on the different restriction fragments
+
+    Check the orientation of reads ->-> / <-<- / -><- / <-->
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+
+    """
+    # Get oriented reads
+    r1, r2 = get_ordered_reads(read1, read2)
+
+    direction = None
+    if get_read_strand(r1) == "+" and get_read_strand(r2) == "+":
+        direction = "FF"
+    elif get_read_strand(r1) == "-" and get_read_strand(r2) == "-":
+        direction = "RR"
+    elif get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
+        direction = "FR"
+    elif get_read_strand(r1) == "-" and get_read_strand(r2) == "+":
+        direction = "RF"
+
+    return direction
+
+
+def get_cis_dist(read1, read2):
+    """
+    Calculte the size of the DNA fragment library
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+
+    """
+    # Get oriented reads
+    ##r1, r2 = get_ordered_reads(read1, read2)
+    dist = None
+    if not r1.is_unmapped and not r2.is_unmapped:
+        ## Contact distances can be calculated for intrachromosomal reads only
+        if isIntraChrom(read1, read2):
+            r1pos = get_read_pos(read1)
+            r2pos = get_read_pos(read2)
+            dist = abs(r1pos - r2pos)
+    return dist
+
+
+def get_read_tag(read, tag):
+    for t in read.get_tags():
+        if t[0] == tag:
+            return t[1]
+    return None
+
+
+if __name__ == "__main__":
+    # Read command line arguments
+    opts = get_args()
+    verbose = False
+    allOutput = False
+    minInsertSize = None
+    maxInsertSize = None
+    minDist = None
+    outputDir = "."
+    gtag = None
+
+    if len(opts) == 0:
+        usage()
+        sys.exit()
+
+    for opt, arg in opts:
+        if opt in ("-h", "--help"):
+            usage()
+            sys.exit()
+        elif opt in ("-r", "--mappedReadsFile"):
+            mappedReadsFile = arg
+        elif opt in ("-o", "--outputDir"):
+            outputDir = arg
+        elif opt in ("-d", "--minCisDist"):
+            minDist = arg
+        elif opt in ("-g", "--gtag"):
+            gtag = arg
+        elif opt in ("-a", "--all"):
+            allOutput = True
+        elif opt in ("-v", "--verbose"):
+            verbose = True
+        else:
+            assert False, "unhandled option"
+
+    # Verbose mode
+    if verbose:
+        print("## overlapMapped2HiCFragments.py")
+        print("## mappedReadsFile=", mappedReadsFile)
+        print("## minCisDist=", minDist)
+        print("## allOuput=", allOutput)
+        print("## verbose={}\n".format(verbose))
+
+    # Initialize variables
+    reads_counter = 0
+    valid_counter = 0
+    valid_counter_FF = 0
+    valid_counter_RR = 0
+    valid_counter_FR = 0
+    valid_counter_RF = 0
+    single_counter = 0
+    dump_counter = 0
+    filt_counter = 0
+
+    # AS counter
+    G1G1_ascounter = 0
+    G2G2_ascounter = 0
+    G1U_ascounter = 0
+    UG1_ascounter = 0
+    G2U_ascounter = 0
+    UG2_ascounter = 0
+    G1G2_ascounter = 0
+    G2G1_ascounter = 0
+    UU_ascounter = 0
+    CF_ascounter = 0
+
+    baseReadsFile = os.path.basename(mappedReadsFile)
+    baseReadsFile = re.sub(r"\.bam$|\.sam$", "", baseReadsFile)
+
+    # Open handlers for output files
+    handle_valid = open(outputDir + "/" + baseReadsFile + ".validPairs", "w")
+
+    if allOutput:
+        handle_dump = open(outputDir + "/" + baseReadsFile + ".DumpPairs", "w")
+        handle_single = open(outputDir + "/" + baseReadsFile + ".SinglePairs", "w")
+        handle_filt = open(outputDir + "/" + baseReadsFile + ".FiltPairs", "w")
+
+    # Read the SAM/BAM file
+    if verbose:
+        print("## Opening SAM/BAM file {} ...".format(mappedReadsFile))
+    samfile = pysam.Samfile(mappedReadsFile, "rb")
+
+    # Reads are 0-based too (for both SAM and BAM format)
+    # Loop on all reads
+    for read in samfile.fetch(until_eof=True):
+        reads_counter += 1
+        cur_handler = None
+        interactionType = None
+        htag = ""
+
+        # First mate
+        if read.is_read1:
+            r1 = read
+            if not r1.is_unmapped:
+                r1_chrom = samfile.get_reference_name(r1.reference_id)
+            else:
+                r1_chrom = None
+
+        # Second mate
+        elif read.is_read2:
+            r2 = read
+            if not r2.is_unmapped:
+                r2_chrom = samfile.get_reference_name(r2.reference_id)
+            else:
+                r2_chrom = None
+
+            if isIntraChrom(r1, r2):
+                dist = get_cis_dist(r1, r2)
+            else:
+                dist = None
+
+            # Check singleton
+            if r1.is_unmapped or r2.is_unmapped:
+                interactionType = "SI"
+                single_counter += 1
+                cur_handler = handle_single if allOutput else None
+
+            # Check Distance criteria - Filter
+            if minDist is not None and dist is not None and dist < int(minDist):
+                interactionType = "FILT"
+                filt_counter += 1
+                cur_handler = handle_filt if allOutput else None
+
+            # By default pair is valid
+            if interactionType == None:
+                interactionType = "VI"
+                valid_counter += 1
+                cur_handler = handle_valid
+                validType = get_valid_orientation(r1, r2)
+                if validType == "RR":
+                    valid_counter_RR += 1
+                elif validType == "FF":
+                    valid_counter_FF += 1
+                elif validType == "FR":
+                    valid_counter_FR += 1
+                elif validType == "RF":
+                    valid_counter_RF += 1
+                else:
+                    interactionType = "DUMP"
+                    dump_counter += 1
+                    cur_handler = handle_dump if allOutput else None
+
+            # Split valid pairs based on XA tag
+            if gtag is not None:
+                r1as = get_read_tag(r1, gtag)
+                r2as = get_read_tag(r2, gtag)
+
+                if r1as == 1 and r2as == 1:
+                    G1G1_ascounter += 1
+                elif r1as == 2 and r2as == 2:
+                    G2G2_ascounter += 1
+                elif r1as == 1 and r2as == 0:
+                    G1U_ascounter += 1
+                elif r1as == 0 and r2as == 1:
+                    UG1_ascounter += 1
+                elif r1as == 2 and r2as == 0:
+                    G2U_ascounter += 1
+                elif r1as == 0 and r2as == 2:
+                    UG2_ascounter += 1
+                elif r1as == 1 and r2as == 2:
+                    G1G2_ascounter += 1
+                elif r1as == 2 and r2as == 1:
+                    G2G1_ascounter += 1
+                elif r1as == 3 or r2as == 3:
+                    CF_ascounter += 1
+                else:
+                    UU_ascounter += 1
+
+            if cur_handler is not None:
+                if not r1.is_unmapped and not r2.is_unmapped:
+                    ##reorient reads to ease duplicates removal
+                    or1, or2 = get_ordered_reads(r1, r2)
+                    or1_chrom = samfile.get_reference_name(or1.reference_id)
+                    or2_chrom = samfile.get_reference_name(or2.reference_id)
+
+                    ##reset as tag now that the reads are oriented
+                    r1as = get_read_tag(or1, gtag)
+                    r2as = get_read_tag(or2, gtag)
+                    if gtag is not None:
+                        htag = str(r1as) + "-" + str(r2as)
+
+                    cur_handler.write(
+                        or1.query_name
+                        + "\t"
+                        + or1_chrom
+                        + "\t"
+                        + str(get_read_pos(or1) + 1)
+                        + "\t"
+                        + str(get_read_strand(or1))
+                        + "\t"
+                        + or2_chrom
+                        + "\t"
+                        + str(get_read_pos(or2) + 1)
+                        + "\t"
+                        + str(get_read_strand(or2))
+                        + "\t"
+                        + "NA"
+                        + "\t"
+                        + "NA"  ##dist
+                        + "\t"
+                        + "NA"  ##resfrag1
+                        + "\t"
+                        + str(or1.mapping_quality)  ##resfrag2
+                        + "\t"
+                        + str(or2.mapping_quality)
+                        + "\t"
+                        + str(htag)
+                        + "\n"
+                    )
+
+                elif r2.is_unmapped and not r1.is_unmapped:
+                    cur_handler.write(
+                        r1.query_name
+                        + "\t"
+                        + r1_chrom
+                        + "\t"
+                        + str(get_read_pos(r1) + 1)
+                        + "\t"
+                        + str(get_read_strand(r1))
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + str(r1.mapping_quality)
+                        + "\t"
+                        + "*"
+                        + "\n"
+                    )
+                elif r1.is_unmapped and not r2.is_unmapped:
+                    cur_handler.write(
+                        r2.query_name
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + r2_chrom
+                        + "\t"
+                        + str(get_read_pos(r2) + 1)
+                        + "\t"
+                        + str(get_read_strand(r2))
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + str(r2.mapping_quality)
+                        + "\n"
+                    )
+
+            if reads_counter % 100000 == 0 and verbose:
+                print("##", reads_counter)
+
+    # Close handler
+    handle_valid.close()
+    if allOutput:
+        handle_dump.close()
+        handle_single.close()
+        handle_filt.close()
+
+    # Write stats file
+    with open(outputDir + "/" + baseReadsFile + ".RSstat", "w") as handle_stat:
+        handle_stat.write("## Hi-C processing - no restriction fragments\n")
+        handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
+        handle_stat.write("Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
+        handle_stat.write("Valid_interaction_pairs_RR\t" + str(valid_counter_RR) + "\n")
+        handle_stat.write("Valid_interaction_pairs_RF\t" + str(valid_counter_RF) + "\n")
+        handle_stat.write("Valid_interaction_pairs_FR\t" + str(valid_counter_FR) + "\n")
+        handle_stat.write("Single-end_pairs\t" + str(single_counter) + "\n")
+        handle_stat.write("Filtered_pairs\t" + str(filt_counter) + "\n")
+        handle_stat.write("Dumped_pairs\t" + str(dump_counter) + "\n")
+
+        ## Write AS report
+        if gtag is not None:
+            handle_stat.write("## ======================================\n")
+            handle_stat.write("## Allele specific information\n")
+            handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
+            handle_stat.write(
+                "Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t"
+                + str(UG1_ascounter + G1U_ascounter)
+                + "\n"
+            )
+            handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
+            handle_stat.write(
+                "Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t"
+                + str(UG2_ascounter + G2U_ascounter)
+                + "\n"
+            )
+            handle_stat.write(
+                "Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter + G2G1_ascounter) + "\n"
+            )
+            handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
diff --git a/bin/mapped_2hic_fragments.py b/bin/mapped_2hic_fragments.py
new file mode 100755
index 0000000000000000000000000000000000000000..a1298716ce83c4e99931534357c03634fd5d8301
--- /dev/null
+++ b/bin/mapped_2hic_fragments.py
@@ -0,0 +1,893 @@
+#!/usr/bin/env python
+
+# HiC-Pro
+# Copyleft 2015 Institut Curie
+# Author(s): Nicolas Servant, Eric Viara
+# Contact: nicolas.servant@curie.fr
+# This software is distributed without any guarantee under the terms of the
+# GNU General
+# Public License, either Version 2, June 1991 or Version 3, June 2007.
+
+"""
+Script to keep only valid 3C products - DE and SC are removed
+Output is : readname / 
+"""
+import time
+import getopt
+import sys
+import os
+import re
+import pysam
+from bx.intervals.intersection import Intersecter, Interval
+
+
+def usage():
+    """Usage function"""
+    print("Usage : python mapped_2hic_fragments.py")
+    print("-f/--fragmentFile <Restriction fragment file GFF3>")
+    print("-r/--mappedReadsFile <BAM/SAM file of mapped reads>")
+    print("[-o/--outputDir] <Output directory. Default is current directory>")
+    print("[-s/--shortestInsertSize] <Shortest insert size of mapped reads to consider>")
+    print("[-l/--longestInsertSize] <Longest insert size of mapped reads to consider>")
+    print("[-t/--shortestFragmentLength] <Shortest restriction fragment length to consider>")
+    print("[-m/--longestFragmentLength] <Longest restriction fragment length to consider>")
+    print("[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>")
+    print(
+        "[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>"
+    )
+    print(
+        "[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>"
+    )
+    print("[-S/--sam] <Output an additional SAM file with flag 'CT' for pairs classification>")
+    print("[-v/--verbose] <Verbose>")
+    print("[-h/--help] <Help>")
+    return
+
+
+def get_args():
+    """Get argument"""
+    try:
+        opts, args = getopt.getopt(
+            sys.argv[1:],
+            "f:r:o:s:l:t:m:d:g:Svah",
+            [
+                "fragmentFile=",
+                "mappedReadsFile=",
+                "outputDir=",
+                "minInsertSize=",
+                "maxInsertSize",
+                "minFragSize",
+                "maxFragSize",
+                "minDist",
+                "gatg",
+                "sam",
+                "verbose",
+                "all",
+                "help",
+            ],
+        )
+    except getopt.GetoptError:
+        usage()
+        sys.exit(-1)
+    return opts
+
+
+def timing(function, *args):
+    """
+    Run a fonction and eturn the run time and the result of the function
+    If the function requires arguments, those can be passed in
+    """
+    startTime = time.time()
+    result = function(*args)
+    print("{} function took {:.3f}ms".format(function.__name__, (time.time() - startTime) * 1000))
+    return result
+
+
+def get_read_strand(read):
+    """
+    Conversion of read position to naive strand representation
+
+    Parameters
+    ----------
+    read : list
+        list of aligned reads
+    """
+    strand = "+"
+    if read.is_reverse:
+        strand = "-"
+    return strand
+
+
+def isIntraChrom(read1, read2):
+    """
+    Return true is the reads pair is intrachromosomal
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+
+    """
+    if read1.tid == read2.tid:
+        return True
+    return False
+
+
+def get_cis_dist(read1, read2):
+    """
+    Calculte the contact distance between two intrachromosomal reads
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+
+    """
+    # Get oriented reads
+    ##r1, r2 = get_ordered_reads(read1, read2)
+    dist = None
+    if not read1.is_unmapped and not read2.is_unmapped:
+        ## Contact distances can be calculated for intrachromosomal reads only
+        if isIntraChrom(read1, read2):
+            r1pos, r2pos = get_read_pos(read1), get_read_pos(read2)
+            dist = abs(r1pos - r2pos)
+    return dist
+
+
+def get_read_pos(read, st="start"):
+    """
+    Return the read position (zero-based) used for the intersection with
+    the restriction fragment
+
+    The 5' end is not a good choice for the reverse reads (which contain part
+    of the restriction site, and thus overlap the next restriction fragment)
+    Using the left-most position (ie. start, 5' for forward, 3' for reverse) or the
+    middle of the read should work but the middle of the reads might be more
+    safe
+
+    Parameters
+    -----------
+    read : list
+        list of aligned reads
+    """
+
+    if st == "middle":
+        pos = read.reference_start + int(read.alen / 2)
+    elif st == "start":
+        pos = get_read_start(read)
+    elif st == "left":
+        pos = read.reference_start
+
+    return pos
+
+
+def get_read_start(read):
+    """
+    Return the 5' end of the read
+    """
+    if read.is_reverse:
+        pos = read.reference_start + read.alen - 1
+    else:
+        pos = read.reference_start
+    return pos
+
+
+def get_ordered_reads(read1, read2):
+    """
+    Reorient reads
+
+    The sequencing is usually not oriented. Reorient the reads so that r1 is
+    always before r2.
+    Sequencing is always performed from 5' to 3' end
+    So in unstranded case, we can have
+
+    1              2
+    --->           --->
+    ==========  or =========
+         <----          <---
+             2             1
+
+    Reordering the reads allow to always be in the first case
+    read1 = [AlignedRead]
+    read2 = [AlignedRead]
+    """
+    if read1.tid == read2.tid:
+        if get_read_pos(read1) < get_read_pos(read2):
+            r1, r2 = read1, read2
+        else:
+            r1, r2 = read2, read1
+    else:
+        if read1.tid < read2.tid:
+            r1, r2 = read1, read2
+        else:
+            r1, r2 = read2, read1
+
+    return r1, r2
+
+
+def load_restriction_fragment(in_file, minfragsize=None, maxfragsize=None, verbose=False):
+    """
+    Read a BED file and store the intervals in a tree
+
+    Intervals are zero-based objects. The output object is a hash table with
+    one search tree per chromosome
+
+    in_file = input file [character]
+    verbose = verbose mode [logical]
+
+    """
+    resFrag = {}
+    if verbose:
+        print("## Loading Restriction File Intervals {} ...".format(in_file))
+    bed_handle = open(in_file)
+    nline = 0
+    nfilt = 0
+    for line in bed_handle:
+        nline += 1
+        bedtab = line.split("\t")
+        try:
+            chromosome, start, end, name = bedtab[:4]
+        except ValueError:
+            print("Warning : wrong input format in line {}. Not a BED file ?!".format(nline))
+            continue
+
+        # BED files are zero-based as Intervals objects
+        start = int(start)  # + 1
+        end = int(end)
+        fragl = abs(end - start)
+        name = name.strip()
+
+        ## Discard fragments outside the size range
+        filt = False
+        if minfragsize != None and int(fragl) < int(minfragsize):
+            nfilt += 1
+            filt = True
+        elif maxfragsize != None and int(fragl) > int(maxfragsize):
+            nfilt += 1
+            filt = True
+
+        if chromosome in resFrag:
+            tree = resFrag[chromosome]
+            tree.add_interval(Interval(start, end, value={"name": name, "filter": filt}))
+        else:
+            tree = Intersecter()
+            tree.add_interval(Interval(start, end, value={"name": name, "filter": filt}))
+            resFrag[chromosome] = tree
+
+    if nfilt > 0:
+        print("Warning : {} fragment(s) outside of range and discarded. {} remaining.".format(nfilt, nline - nfilt))
+    bed_handle.close()
+    return resFrag
+
+
+def get_overlapping_restriction_fragment(resFrag, chrom, read):
+    """
+    Intersect a given read with the set of restriction fragments
+
+    ##
+    resFrag = the restriction fragments [hash]
+    chrom = the chromosome to look at [character]
+    read = the read to intersect [AlignedRead]
+
+    """
+    # Get read position (middle or start)
+    pos = get_read_pos(read, st="middle")
+
+    if chrom in resFrag:
+        # Overlap with the position of the read (zero-based)
+        resfrag = resFrag[chrom].find(pos, pos + 1)
+        if len(resfrag) > 1:
+            print("Warning : {} restictions fragments found for {} -skipped".format(len(resfrag), read.query_name))
+            return None
+        elif len(resfrag) == 0:
+            print("Warning - no restriction fragments for {} at {} : {}".format(read.query_name, chrom, pos))
+            return None
+        else:
+            return resfrag[0]
+    else:
+        print("Warning - no restriction fragments for {} at {} : {}".format(read.qname, chrom, pos))
+        return None
+
+
+def are_contiguous_fragments(frag1, frag2, chr1, chr2):
+    """
+    Compare fragment positions to check if they are contiguous
+    """
+    ret = False
+    if chr1 == chr2:
+        if int(frag1.start) < int(frag2.start):
+            d = int(frag2.start) - int(frag1.end)
+        else:
+            d = int(frag1.start) - int(frag2.end)
+
+        if d == 0:
+            ret = True
+
+    return ret
+
+
+def is_religation(read1, read2, frag1, frag2):
+    """
+    Reads are expected to map adjacent fragments
+    Check the orientation of reads -><-
+
+    """
+    ret = False
+    if are_contiguous_fragments(frag1, frag2, read1.tid, read2.tid):
+        # r1, r2 = get_ordered_reads(read1, read2)
+        # if get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
+        ret = True
+    return ret
+
+
+def is_self_circle(read1, read2):
+    """
+    Both reads are expected to be on the same restriction fragments
+    Check the orientation of reads <-->
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+    """
+    ret = False
+    # Get oriented reads
+    r1, r2 = get_ordered_reads(read1, read2)
+    # 1<- ->2 or 2<- ->1
+    if get_read_strand(r1) == "-" and get_read_strand(r2) == "+":
+        ret = True
+    return ret
+
+
+def is_dangling_end(read1, read2):
+    """
+    Both reads are expected to be on the same restriction fragments
+    Check the orientation of reads -><-
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+    """
+    ret = False
+    # Get oriented reads
+    r1, r2 = get_ordered_reads(read1, read2)
+    # 1-> <-2 or 2-> <-1
+    if get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
+        ret = True
+    return ret
+
+
+def get_valid_orientation(read1, read2):
+    """
+    Both reads are expected to be on the different restriction fragments
+    Check the orientation of reads ->-> / <-<- / -><- / <-->
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+
+    """
+    # Get oriented reads
+    r1, r2 = get_ordered_reads(read1, read2)
+
+    direction = None
+    if get_read_strand(r1) == "+" and get_read_strand(r2) == "+":
+        direction = "FF"
+    elif get_read_strand(r1) == "-" and get_read_strand(r2) == "-":
+        direction = "RR"
+    elif get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
+        direction = "FR"
+    elif get_read_strand(r1) == "-" and get_read_strand(r2) == "+":
+        direction = "RF"
+
+    return direction
+
+
+def get_PE_fragment_size(read1, read2, resFrag1, resFrag2, interactionType):
+    """
+    Calculte the size of the DNA fragment library
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+    resfrag1 = restriction fragment overlapping the R1 read [interval]
+    resfrag1 = restriction fragment overlapping the R1 read [interval]
+    interactionType : Type of interaction from get_interaction_type() [str]
+
+    """
+
+    fragmentsize = None
+
+    # Get oriented reads
+    r1, r2 = get_ordered_reads(read1, read2)
+    if not r1.is_unmapped and not r2.is_unmapped:
+        if r1 == read2:
+            rfrag1 = resFrag2
+            rfrag2 = resFrag1
+        else:
+            rfrag1 = resFrag1
+            rfrag2 = resFrag2
+
+        ## In this case use the read start !
+        r1pos = get_read_start(r1)
+        r2pos = get_read_start(r2)
+
+        if interactionType == "DE" or interactionType == "RE":
+            fragmentsize = r2pos - r1pos
+        elif interactionType == "SC":
+            fragmentsize = (r1pos - rfrag1.start) + (rfrag2.end - r2pos)
+        elif interactionType == "VI":
+            if get_read_strand(r1) == "+":
+                dr1 = rfrag1.end - r1pos
+            else:
+                dr1 = r1pos - rfrag1.start
+            if get_read_strand(r2) == "+":
+                dr2 = rfrag2.end - r2pos
+            else:
+                dr2 = r2pos - rfrag2.start
+            fragmentsize = dr2 + dr1
+
+    return fragmentsize
+
+
+def get_interaction_type(read1, read1_chrom, resfrag1, read2, read2_chrom, resfrag2, verbose):
+    """
+    Returns the interaction type
+
+    For a given reads pair and their related restriction fragment, classify
+    the 3C products as :
+
+    - Interaction
+    - Self circle
+    - Dangling end
+    - Religation
+    - Unknown
+
+    ##
+    read1 = the R1 read of the pair [AlignedRead]
+    read1_chrom = the chromosome of R1 read [character]
+    resfrag1 = restrictin fragment overlapping the R1 read [interval]
+    read2 = the R2 read of the pair [AlignedRead]
+    read2_chrom = the chromosome of R2 read [character]
+    resfrag2 = restrictin fragment overlapping the R2 read [interval]
+    verbose = verbose mode [logical]
+
+    """
+
+    # If returned InteractionType=None -> Same restriction fragment
+    # and same strand = Dump
+    interactionType = None
+
+    if not read1.is_unmapped and not read2.is_unmapped and resfrag1 is not None and resfrag2 is not None:
+        # same restriction fragment
+        if resfrag1 == resfrag2:
+            # Self_circle <- ->
+            if is_self_circle(read1, read2):
+                interactionType = "SC"
+            # Dangling_end -> <-
+            elif is_dangling_end(read1, read2):
+                interactionType = "DE"
+        elif is_religation(read1, read2, resfrag1, resfrag2):
+            interactionType = "RE"
+        else:
+            interactionType = "VI"
+    elif r1.is_unmapped or r2.is_unmapped:
+        interactionType = "SI"
+
+    return interactionType
+
+
+def get_read_tag(read, tag):
+    for t in read.get_tags():
+        if t[0] == tag:
+            return t[1]
+    return None
+
+
+if __name__ == "__main__":
+    # Read command line arguments
+    opts = get_args()
+    samOut = False
+    verbose = False
+    allOutput = False
+    minInsertSize = None
+    maxInsertSize = None
+    minFragSize = None
+    maxFragSize = None
+    minDist = None
+    outputDir = "."
+    gtag = None
+
+    if len(opts) == 0:
+        usage()
+        sys.exit()
+
+    for opt, arg in opts:
+        if opt in ("-h", "--help"):
+            usage()
+            sys.exit()
+        elif opt in ("-f", "--fragmentFile"):
+            fragmentFile = arg
+        elif opt in ("-r", "--mappedReadsFile"):
+            mappedReadsFile = arg
+        elif opt in ("-o", "--outputDir"):
+            outputDir = arg
+        elif opt in ("-s", "--shortestInsertSize"):
+            minInsertSize = arg
+        elif opt in ("-l", "--longestInsertSize"):
+            maxInsertSize = arg
+        elif opt in ("-t", "--shortestFragmentLength"):
+            minFragSize = arg
+        elif opt in ("-m", "--longestFragmentLength"):
+            maxFragSize = arg
+        elif opt in ("-d", "--minCisDist"):
+            minDist = arg
+        elif opt in ("-g", "--gtag"):
+            gtag = arg
+        elif opt in ("-a", "--all"):
+            allOutput = True
+        elif opt in ("-S", "--sam"):
+            samOut = True
+        elif opt in ("-v", "--verbose"):
+            verbose = True
+        else:
+            assert False, "unhandled option"
+
+    # Verbose mode
+    if verbose:
+        print("## overlapMapped2HiCFragments.py")
+        print("## mappedReadsFile=", mappedReadsFile)
+        print("## fragmentFile=", fragmentFile)
+        print("## minInsertSize=", minInsertSize)
+        print("## maxInsertSize=", maxInsertSize)
+        print("## minFragSize=", minFragSize)
+        print("## maxFragSize=", maxFragSize)
+        print("## allOuput=", allOutput)
+        print("## SAM ouput=", samOut)
+        print("## verbose={}\n".format(verbose))
+
+    # Initialize variables
+    reads_counter = 0
+    de_counter = 0
+    re_counter = 0
+    sc_counter = 0
+    valid_counter = 0
+    valid_counter_FF = 0
+    valid_counter_RR = 0
+    valid_counter_FR = 0
+    valid_counter_RF = 0
+    single_counter = 0
+    dump_counter = 0
+    filt_counter = 0
+
+    ## AS counter
+    G1G1_ascounter = 0
+    G2G2_ascounter = 0
+    G1U_ascounter = 0
+    UG1_ascounter = 0
+    G2U_ascounter = 0
+    UG2_ascounter = 0
+    G1G2_ascounter = 0
+    G2G1_ascounter = 0
+    UU_ascounter = 0
+    CF_ascounter = 0
+
+    baseReadsFile = os.path.basename(mappedReadsFile)
+    baseReadsFile = re.sub(r"\.bam$|\.sam$", "", baseReadsFile)
+
+    # Open handlers for output files
+    handle_valid = open(outputDir + "/" + baseReadsFile + ".validPairs", "w")
+
+    if allOutput:
+        handle_de = open(outputDir + "/" + baseReadsFile + ".DEPairs", "w")
+        handle_re = open(outputDir + "/" + baseReadsFile + ".REPairs", "w")
+        handle_sc = open(outputDir + "/" + baseReadsFile + ".SCPairs", "w")
+        handle_dump = open(outputDir + "/" + baseReadsFile + ".DumpPairs", "w")
+        handle_single = open(outputDir + "/" + baseReadsFile + ".SinglePairs", "w")
+        handle_filt = open(outputDir + "/" + baseReadsFile + ".FiltPairs", "w")
+
+    # Read the BED file
+    resFrag = timing(load_restriction_fragment, fragmentFile, minFragSize, maxFragSize, verbose)
+
+    # Read the SAM/BAM file
+    if verbose:
+        print("## Opening SAM/BAM file {} ...".format(mappedReadsFile))
+    samfile = pysam.Samfile(mappedReadsFile, "rb")
+
+    if samOut:
+        handle_sam = pysam.AlignmentFile(outputDir + "/" + baseReadsFile + "_interaction.bam", "wb", template=samfile)
+
+    # Reads are 0-based too (for both SAM and BAM format)
+    # Loop on all reads
+    if verbose:
+        print("## Classifying Interactions ...")
+
+    for read in samfile.fetch(until_eof=True):
+        reads_counter += 1
+        cur_handler = None
+        htag = ""
+
+        # First mate
+        if read.is_read1:
+            r1 = read
+            if not r1.is_unmapped:
+                r1_chrom = samfile.get_reference_name(r1.tid)
+                r1_resfrag = get_overlapping_restriction_fragment(resFrag, r1_chrom, r1)
+            else:
+                r1_resfrag = None
+                r1_chrom = None
+
+        # Second mate
+        elif read.is_read2:
+            r2 = read
+            if not r2.is_unmapped:
+                r2_chrom = samfile.get_reference_name(r2.tid)
+                r2_resfrag = get_overlapping_restriction_fragment(resFrag, r2_chrom, r2)
+            else:
+                r2_resfrag = None
+                r2_chrom = None
+
+            if r1_resfrag is not None or r2_resfrag is not None:
+                interactionType = get_interaction_type(r1, r1_chrom, r1_resfrag, r2, r2_chrom, r2_resfrag, verbose)
+                dist = get_PE_fragment_size(r1, r2, r1_resfrag, r2_resfrag, interactionType)
+                cdist = get_cis_dist(r1, r2)
+
+                ## Filter based on restriction fragments
+                if (r1_resfrag is not None and r1_resfrag.value["filter"] == True) or (
+                    r2_resfrag is not None and r2_resfrag.value["filter"]
+                ) == True:
+                    interactionType = "FILT"
+
+                # Check Insert size criteria - FILT
+                if (minInsertSize is not None and dist is not None and dist < int(minInsertSize)) or (
+                    maxInsertSize is not None and dist is not None and dist > int(maxInsertSize)
+                ):
+                    interactionType = "FILT"
+
+                # Check Distance criteria - FILT
+                # Done for VI otherwise this criteria will overwrite all other invalid classification
+                if interactionType == "VI" and minDist is not None and cdist is not None and cdist < int(minDist):
+                    interactionType = "FILT"
+
+                if interactionType == "VI":
+                    valid_counter += 1
+                    cur_handler = handle_valid
+                    validType = get_valid_orientation(r1, r2)
+                    if validType == "RR":
+                        valid_counter_RR += 1
+                    elif validType == "FF":
+                        valid_counter_FF += 1
+                    elif validType == "FR":
+                        valid_counter_FR += 1
+                    elif validType == "RF":
+                        valid_counter_RF += 1
+
+                    ## Counts valid pairs based on XA tag
+                    if gtag is not None:
+                        r1as = get_read_tag(r1, gtag)
+                        r2as = get_read_tag(r2, gtag)
+                        if r1as == 1 and r2as == 1:
+                            G1G1_ascounter += 1
+                        elif r1as == 2 and r2as == 2:
+                            G2G2_ascounter += 1
+                        elif r1as == 1 and r2as == 0:
+                            G1U_ascounter += 1
+                        elif r1as == 0 and r2as == 1:
+                            UG1_ascounter += 1
+                        elif r1as == 2 and r2as == 0:
+                            G2U_ascounter += 1
+                        elif r1as == 0 and r2as == 2:
+                            UG2_ascounter += 1
+                        elif r1as == 1 and r2as == 2:
+                            G1G2_ascounter += 1
+                        elif r1as == 2 and r2as == 1:
+                            G2G1_ascounter += 1
+                        elif r1as == 3 or r2as == 3:
+                            CF_ascounter += 1
+                        else:
+                            UU_ascounter += 1
+
+                elif interactionType == "DE":
+                    de_counter += 1
+                    cur_handler = handle_de if allOutput else None
+
+                elif interactionType == "RE":
+                    re_counter += 1
+                    cur_handler = handle_re if allOutput else None
+
+                elif interactionType == "SC":
+                    sc_counter += 1
+                    cur_handler = handle_sc if allOutput else None
+
+                elif interactionType == "SI":
+                    single_counter += 1
+                    cur_handler = handle_single if allOutput else None
+
+                elif interactionType == "FILT":
+                    filt_counter += 1
+                    cur_handler = handle_filt if allOutput else None
+
+                else:
+                    interactionType = "DUMP"
+                    dump_counter += 1
+                    cur_handler = handle_dump if allOutput else None
+            else:
+                interactionType = "DUMP"
+                dump_counter += 1
+                cur_handler = handle_dump if allOutput else None
+                dist = None
+
+            ## Write results in right handler
+            if cur_handler is not None:
+                if not r1.is_unmapped and not r2.is_unmapped:
+                    ##reorient reads to ease duplicates removal
+                    or1, or2 = get_ordered_reads(r1, r2)
+                    or1_chrom = samfile.get_reference_name(or1.tid)
+                    or2_chrom = samfile.get_reference_name(or2.tid)
+
+                    ##reset as tag now that the reads are oriented
+                    r1as = get_read_tag(or1, gtag)
+                    r2as = get_read_tag(or2, gtag)
+                    if gtag is not None:
+                        htag = str(r1as) + "-" + str(r2as)
+
+                    ##get fragment name and reorient if necessary
+                    if or1 == r1 and or2 == r2:
+                        or1_resfrag = r1_resfrag
+                        or2_resfrag = r2_resfrag
+                    elif or1 == r2 and or2 == r1:
+                        or1_resfrag = r2_resfrag
+                        or2_resfrag = r1_resfrag
+
+                    if or1_resfrag is not None:
+                        or1_fragname = or1_resfrag.value["name"]
+                    else:
+                        or1_fragname = "None"
+
+                    if or2_resfrag is not None:
+                        or2_fragname = or2_resfrag.value["name"]
+                    else:
+                        or2_fragname = "None"
+
+                    cur_handler.write(
+                        or1.query_name
+                        + "\t"
+                        + or1_chrom
+                        + "\t"
+                        + str(get_read_pos(or1) + 1)
+                        + "\t"
+                        + str(get_read_strand(or1))
+                        + "\t"
+                        + or2_chrom
+                        + "\t"
+                        + str(get_read_pos(or2) + 1)
+                        + "\t"
+                        + str(get_read_strand(or2))
+                        + "\t"
+                        + str(dist)
+                        + "\t"
+                        + or1_fragname
+                        + "\t"
+                        + or2_fragname
+                        + "\t"
+                        + str(or1.mapping_quality)
+                        + "\t"
+                        + str(or2.mapping_quality)
+                        + "\t"
+                        + str(htag)
+                        + "\n"
+                    )
+
+                elif r2.is_unmapped and not r1.is_unmapped:
+                    if r1_resfrag is not None:
+                        r1_fragname = r1_resfrag.value["name"]
+
+                    cur_handler.write(
+                        r1.query_name
+                        + "\t"
+                        + r1_chrom
+                        + "\t"
+                        + str(get_read_pos(r1) + 1)
+                        + "\t"
+                        + str(get_read_strand(r1))
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + r1_fragname
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + str(r1.mapping_quality)
+                        + "\t"
+                        + "*"
+                        + "\n"
+                    )
+                elif r1.is_unmapped and not r2.is_unmapped:
+                    if r2_resfrag is not None:
+                        r2_fragname = r2_resfrag.value["name"]
+
+                    cur_handler.write(
+                        r2.query_name
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + r2_chrom
+                        + "\t"
+                        + str(get_read_pos(r2) + 1)
+                        + "\t"
+                        + str(get_read_strand(r2))
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + r2_fragname
+                        + "\t"
+                        + "*"
+                        + "\t"
+                        + str(r2.mapping_quality)
+                        + "\n"
+                    )
+
+                ## Keep initial order
+                if samOut:
+                    r1.tags = r1.tags + [("CT", str(interactionType))]
+                    r2.tags = r2.tags + [("CT", str(interactionType))]
+                    handle_sam.write(r1)
+                    handle_sam.write(r2)
+
+            if reads_counter % 100000 == 0 and verbose:
+                print("##", reads_counter)
+
+    # Close handler
+    handle_valid.close()
+    if allOutput:
+        handle_de.close()
+        handle_re.close()
+        handle_sc.close()
+        handle_dump.close()
+        handle_single.close()
+        handle_filt.close()
+
+    # Write stats file
+    handle_stat = open(outputDir + "/" + baseReadsFile + ".RSstat", "w")
+    handle_stat.write("## Hi-C processing\n")
+    handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
+    handle_stat.write("Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
+    handle_stat.write("Valid_interaction_pairs_RR\t" + str(valid_counter_RR) + "\n")
+    handle_stat.write("Valid_interaction_pairs_RF\t" + str(valid_counter_RF) + "\n")
+    handle_stat.write("Valid_interaction_pairs_FR\t" + str(valid_counter_FR) + "\n")
+    handle_stat.write("Dangling_end_pairs\t" + str(de_counter) + "\n")
+    handle_stat.write("Religation_pairs\t" + str(re_counter) + "\n")
+    handle_stat.write("Self_Cycle_pairs\t" + str(sc_counter) + "\n")
+    handle_stat.write("Single-end_pairs\t" + str(single_counter) + "\n")
+    handle_stat.write("Filtered_pairs\t" + str(filt_counter) + "\n")
+    handle_stat.write("Dumped_pairs\t" + str(dump_counter) + "\n")
+
+    ## Write AS report
+    if gtag is not None:
+        handle_stat.write("## ======================================\n")
+        handle_stat.write("## Allele specific information\n")
+        handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
+        handle_stat.write(
+            "Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t"
+            + str(UG1_ascounter + G1U_ascounter)
+            + "\n"
+        )
+        handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
+        handle_stat.write(
+            "Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t"
+            + str(UG2_ascounter + G2U_ascounter)
+            + "\n"
+        )
+        handle_stat.write(
+            "Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter + G2G1_ascounter) + "\n"
+        )
+        handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
+        handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
+
+    handle_stat.close()
+
+    if samOut:
+        samfile.close()
diff --git a/bin/mergeSAM.py b/bin/mergeSAM.py
new file mode 100755
index 0000000000000000000000000000000000000000..d670ec2c6a94b89035ab417b4146806099c2c5aa
--- /dev/null
+++ b/bin/mergeSAM.py
@@ -0,0 +1,369 @@
+#!/usr/bin/env python
+
+## HiC-Pro
+## Copyright (c) 2015 Institut Curie
+## Author(s): Nicolas Servant, Eric Viara
+## Contact: nicolas.servant@curie.fr
+## This software is distributed without any guarantee under the terms of the BSD-3 licence.
+## See the LICENCE file for details
+
+
+"""
+Script to pair 2 SAM/BAM files into one PE BAM
+- On 03/05/16 Ferhat made changes starting from ~/bin/HiC-Pro_2.7.2b/scripts/mergeSAM.py 
+to make singletons possible to be reported
+"""
+
+import getopt
+import sys
+import os
+import re
+import pysam
+
+
+def usage():
+    """Usage function"""
+    print("Usage : python mergeSAM.py")
+    print("-f/--forward <forward read mapped file>")
+    print("-r/--reverse <reverse read mapped file>")
+    print("[-o/--output] <Output file. Default is stdin>")
+    print("[-s/--single] <report singleton>")
+    print("[-m/--multi] <report multiple hits>")
+    print("[-q/--qual] <minimum reads mapping quality>")
+    print("[-t/--stat] <generate a stat file>")
+    print("[-v/--verbose] <Verbose>")
+    print("[-h/--help] <Help>")
+    return
+
+
+def get_args():
+    """Get argument"""
+    try:
+        opts, args = getopt.getopt(
+            sys.argv[1:],
+            "f:r:o:q:smtvh",
+            ["forward=", "reverse=", "output=", "qual=", "single", "multi", "stat", "verbose", "help"],
+        )
+    except getopt.GetoptError:
+        usage()
+        sys.exit(-1)
+    return opts
+
+
+def is_unique_bowtie2(read):
+    ret = False
+    if not read.is_unmapped and read.has_tag("AS"):
+        if read.has_tag("XS"):
+            primary = read.get_tag("AS")
+            secondary = read.get_tag("XS")
+            if primary > secondary:
+                ret = True
+        else:
+            ret = True
+    return ret
+
+
+## Remove everything after "/" or " " in read's name
+def get_read_name(read):
+    name = read.query_name
+    # return name.split("/",1)[0]
+    return re.split("/| ", name)[0]
+
+
+def sam_flag(read1, read2, hr1, hr2):
+    f1 = read1.flag
+    f2 = read2.flag
+
+    if r1.is_unmapped == False:
+        r1_chrom = hr1.get_reference_name(r1.reference_id)
+    else:
+        r1_chrom = "*"
+    if r2.is_unmapped == False:
+        r2_chrom = hr2.get_reference_name(r2.reference_id)
+    else:
+        r2_chrom = "*"
+
+    ##Relevant bitwise flags (flag in an 11-bit binary number)
+    ##1 The read is one of a pair
+    ##2 The alignment is one end of a proper paired-end alignment
+    ##4 The read has no reported alignments
+    ##8 The read is one of a pair and has no reported alignments
+    ##16 The alignment is to the reverse reference strand
+    ##32 The other mate in the paired-end alignment is aligned to the reverse reference strand
+    ##64 The read is the first (#1) mate in a pair
+    ##128 The read is the second (#2) mate in a pair
+
+    ##The reads were mapped as single-end data, so should expect flags of
+    ##0 (map to the '+' strand) or 16 (map to the '-' strand)
+    ##Output example: a paired-end read that aligns to the reverse strand
+    ##and is the first mate in the pair will have flag 83 (= 64 + 16 + 2 + 1)
+
+    if f1 & 0x4:
+        f1 = f1 | 0x8
+
+    if f2 & 0x4:
+        f2 = f2 | 0x8
+
+    if not (f1 & 0x4) and not (f2 & 0x4):
+        ##The flag should now indicate this is paired-end data
+        f1 = f1 | 0x1
+        f1 = f1 | 0x2
+        f2 = f2 | 0x1
+        f2 = f2 | 0x2
+
+    ##Indicate if the pair is on the reverse strand
+    if f1 & 0x10:
+        f2 = f2 | 0x20
+
+    if f2 & 0x10:
+        f1 = f1 | 0x20
+
+    ##Is this first or the second pair?
+    f1 = f1 | 0x40
+    f2 = f2 | 0x80
+
+    ##Insert the modified bitwise flags into the reads
+    read1.flag = f1
+    read2.flag = f2
+
+    ##Determine the RNEXT and PNEXT values (i.e. the positional values of a read's pair)
+    # RNEXT
+    if r1_chrom == r2_chrom:
+        read1.next_reference_id = r1.reference_id
+        read2.next_reference_id = r1.reference_id
+    else:
+        read1.next_reference_id = r2.reference_id
+        read2.next_reference_id = r1.reference_id
+    # PNEXT
+    read1.next_reference_start = read2.reference_start
+    read2.next_reference_start = read1.reference_start
+
+    return (read1, read2)
+
+
+if __name__ == "__main__":
+    ## Read command line arguments
+    opts = get_args()
+    inputFile = None
+    outputFile = None
+    mapq = None
+    report_single = False
+    report_multi = False
+    verbose = False
+    stat = False
+    output = "-"
+
+    if len(opts) == 0:
+        usage()
+        sys.exit()
+
+    for opt, arg in opts:
+        if opt in ("-h", "--help"):
+            usage()
+            sys.exit()
+        elif opt in ("-f", "--forward"):
+            R1file = arg
+        elif opt in ("-r", "--reverse"):
+            R2file = arg
+        elif opt in ("-o", "--output"):
+            output = arg
+        elif opt in ("-q", "--qual"):
+            mapq = arg
+        elif opt in ("-s", "--single"):
+            report_single = True
+        elif opt in ("-m", "--multi"):
+            report_multi = True
+        elif opt in ("-t", "--stat"):
+            stat = True
+        elif opt in ("-v", "--verbose"):
+            verbose = True
+        else:
+            assert False, "unhandled option"
+
+    ## Verbose mode
+    if verbose:
+        print("## mergeBAM.py")
+        print("## forward=", R1file)
+        print("## reverse=", R2file)
+        print("## output=", output)
+        print("## min mapq=", mapq)
+        print("## report_single=", report_single)
+        print("## report_multi=", report_multi)
+        print("## verbose=", verbose)
+
+    ## Initialize variables
+    tot_pairs_counter = 0
+    multi_pairs_counter = 0
+    uniq_pairs_counter = 0
+    unmapped_pairs_counter = 0
+    lowq_pairs_counter = 0
+    multi_singles_counter = 0
+    uniq_singles_counter = 0
+    lowq_singles_counter = 0
+
+    # local_counter = 0
+    paired_reads_counter = 0
+    singleton_counter = 0
+    reads_counter = 0
+    r1 = None
+    r2 = None
+
+    ## Reads are 0-based too (for both SAM and BAM format)
+    ## Loop on all reads
+    if verbose:
+        print("## Merging forward and reverse tags ...")
+
+    with pysam.Samfile(R1file, "rb") as hr1, pysam.Samfile(R2file, "rb") as hr2:
+        if output == "-":
+            outfile = pysam.AlignmentFile(output, "w", template=hr1)
+        else:
+            outfile = pysam.AlignmentFile(output, "wb", template=hr1)
+
+        for r1, r2 in zip(hr1.fetch(until_eof=True), hr2.fetch(until_eof=True)):
+            reads_counter += 1
+            if reads_counter % 1000000 == 0 and verbose:
+                print("##", reads_counter)
+
+            if get_read_name(r1) == get_read_name(r2):
+                ## both unmapped
+                if r1.is_unmapped == True and r2.is_unmapped == True:
+                    unmapped_pairs_counter += 1
+                    continue
+
+                ## both mapped
+                elif r1.is_unmapped == False and r2.is_unmapped == False:
+                    ## quality
+                    if mapq != None and (r1.mapping_quality < int(mapq) or r2.mapping_quality < int(mapq)):
+                        lowq_pairs_counter += 1
+                        continue
+
+                    ## Unique mapping
+                    if is_unique_bowtie2(r1) == True and is_unique_bowtie2(r2) == True:
+                        uniq_pairs_counter += 1
+                    else:
+                        multi_pairs_counter += 1
+                        if report_multi == False:
+                            continue
+
+                ## One mate maped
+                else:
+                    singleton_counter += 1
+                    if report_single == False:
+                        continue
+                    if r1.is_unmapped == False:  ## first end is mapped, second is not
+                        ## quality
+                        if mapq != None and (r1.mapping_quality < int(mapq)):
+                            lowq_singles_counter += 1
+                            continue
+                        ## Unique mapping
+                        if is_unique_bowtie2(r1) == True:
+                            uniq_singles_counter += 1
+                        else:
+                            multi_singles_counter += 1
+                            if report_multi == False:
+                                continue
+                    else:  ## second end is mapped, first is not
+                        ## quality
+                        if mapq != None and (r2.mapping_quality < int(mapq)):
+                            lowq_singles_counter += 1
+                            continue
+                        ## Unique mapping
+                        if is_unique_bowtie2(r2) == True:
+                            uniq_singles_counter += 1
+                        else:
+                            multi_singles_counter += 1
+                            if report_multi == False:
+                                continue
+
+                tot_pairs_counter += 1
+                (r1, r2) = sam_flag(r1, r2, hr1, hr2)
+
+                ## Write output
+                outfile.write(r1)
+                outfile.write(r2)
+
+            else:
+                print(
+                    "Forward and reverse reads not paired. Check that BAM files have the same read names and are sorted."
+                )
+                sys.exit(1)
+
+        if stat:
+            if output == "-":
+                statfile = "pairing.stat"
+            else:
+                statfile = re.sub("\.bam$", ".pairstat", output)
+            with open(statfile, "w") as handle_stat:
+                handle_stat.write(
+                    "Total_pairs_processed\t"
+                    + str(reads_counter)
+                    + "\t"
+                    + str(round(float(reads_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Unmapped_pairs\t"
+                    + str(unmapped_pairs_counter)
+                    + "\t"
+                    + str(round(float(unmapped_pairs_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Low_qual_pairs\t"
+                    + str(lowq_pairs_counter)
+                    + "\t"
+                    + str(round(float(lowq_pairs_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Unique_paired_alignments\t"
+                    + str(uniq_pairs_counter)
+                    + "\t"
+                    + str(round(float(uniq_pairs_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Multiple_pairs_alignments\t"
+                    + str(multi_pairs_counter)
+                    + "\t"
+                    + str(round(float(multi_pairs_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Pairs_with_singleton\t"
+                    + str(singleton_counter)
+                    + "\t"
+                    + str(round(float(singleton_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Low_qual_singleton\t"
+                    + str(lowq_singles_counter)
+                    + "\t"
+                    + str(round(float(lowq_singles_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Unique_singleton_alignments\t"
+                    + str(uniq_singles_counter)
+                    + "\t"
+                    + str(round(float(uniq_singles_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Multiple_singleton_alignments\t"
+                    + str(multi_singles_counter)
+                    + "\t"
+                    + str(round(float(multi_singles_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+                handle_stat.write(
+                    "Reported_pairs\t"
+                    + str(tot_pairs_counter)
+                    + "\t"
+                    + str(round(float(tot_pairs_counter) / float(reads_counter) * 100, 3))
+                    + "\n"
+                )
+    hr1.close()
+    hr2.close()
+    outfile.close()
diff --git a/bin/merge_statfiles.py b/bin/merge_statfiles.py
new file mode 100755
index 0000000000000000000000000000000000000000..c3986e1e6534eef84c0d11a7e95ee608dc571de2
--- /dev/null
+++ b/bin/merge_statfiles.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+## nf-core-hic
+## Copyright (c) 2020 Institut Curie
+## Author(s): Nicolas Servant
+## Contact: nicolas.servant@curie.fr
+## This software is distributed without any guarantee under the terms of the BSD-3 licence.
+## See the LICENCE file for details
+
+"""
+Script to merge any files with the same template
+"""
+
+import argparse
+import sys
+import glob
+import os
+from collections import OrderedDict
+
+
+def num(s):
+    try:
+        return int(s)
+    except ValueError:
+        return float(s)
+
+
+if __name__ == "__main__":
+    ## Read command line arguments
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-f", "--files", help="List of input file(s)", type=str, nargs="+")
+    parser.add_argument("-v", "--verbose", help="verbose mode", action="store_true")
+    args = parser.parse_args()
+
+    infiles = args.files
+    li = len(infiles)
+
+    if li > 0:
+        if args.verbose:
+            print("## merge_statfiles.py")
+            print("## Merging " + str(li) + " files")
+
+        ## Reading first file to get the template
+        template = OrderedDict()
+        if args.verbose:
+            print("## Use " + infiles[0] + " as template")
+        with open(infiles[0]) as f:
+            for line in f:
+                if not line.startswith("#"):
+                    lsp = line.strip().split("\t")
+                    data = map(num, lsp[1 : len(lsp)])
+                    template[str(lsp[0])] = list(data)
+
+        if len(template) == 0:
+            print("Cannot find template files !")
+            sys.exit(1)
+
+        ## Int are counts / Float are percentage
+        for fidx in list(range(1, li)):
+            with open(infiles[fidx]) as f:
+                for line in f:
+                    if not line.startswith("#"):
+                        lsp = line.strip().split("\t")
+                        if lsp[0] in template:
+                            for i in list(range(1, len(lsp))):
+                                if isinstance(num(lsp[i]), int):
+                                    template[lsp[0]][i - 1] += num(lsp[i])
+                                else:
+                                    template[lsp[0]][i - 1] = round((template[lsp[0]][i - 1] + num(lsp[i])) / 2, 3)
+                        else:
+                            sys.stderr.write(
+                                "Warning : '" + lsp[0] + "' not found in template [" + infiles[fidx] + "]\n"
+                            )
+
+        ## Print template
+        for x in template:
+            sys.stdout.write(x)
+            for y in template[x]:
+                sys.stdout.write("\t" + str(y))
+            sys.stdout.write("\n")
+
+    else:
+        print("No files to merge - stop")
+        sys.exit(1)
diff --git a/bin/src/build_matrix.cpp b/bin/src/build_matrix.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e366d5b7649d3f9eb040a80eee5a5d10572f6593
--- /dev/null
+++ b/bin/src/build_matrix.cpp
@@ -0,0 +1,1037 @@
+// HiC-Pro
+// Copyright 2015 Institut Curie                               
+// Author(s): Eric Viara
+// Contact: nicolas.servant@curie.fr
+// This software is distributed without any guarantee under the terms of the BSD-3 License
+
+#include <iostream>
+#include <iomanip>
+#include <fstream>
+#include <sstream>
+#include <unordered_map>
+#include <map>
+#include <vector>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <math.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/stat.h>
+
+
+static const int SPARSE_FMT = 0x1;
+static const int BED_FMT = 0x2;
+static const char* prog;
+static bool progress = false;
+static bool detail_progress = false;
+static bool quiet = false;
+
+static bool NO_DICHO = getenv("NO_DICHO") != NULL;
+
+typedef unsigned int chrsize_t;
+
+const std::string VERSION = "1.2 [2015-10-20]";
+
+const static chrsize_t BIN_NOT_FOUND = (chrsize_t)-1;
+
+class AxisChromosome;
+
+static bool is_empty_line(const char* buffer)
+{
+  while (char c = *buffer++) {
+    if (c != ' ' || c != '\n' || c != '\t') {
+      return false;
+    }
+  }
+  return true;
+}
+
+static int bed_line_parse(char* buffer, char chr[], chrsize_t& start, chrsize_t& end, const std::string& bedfile, size_t line_num)
+{
+  if (sscanf(buffer, "%s %u %u", chr, &start, &end) != 3) {
+    std::cerr << "bed file \"" << bedfile << "\" at line #" << line_num << " format error\n";
+    return 1;
+  }
+  return 0;
+}
+
+struct Interval {
+  chrsize_t start;
+  chrsize_t end;
+
+  Interval(chrsize_t start = 0, chrsize_t end = 0) : start(start), end(end) { }
+};
+ 
+class ChrRegions {
+
+  std::vector<std::string> chr_v;
+  std::map<std::string, std::vector<Interval>* > intervals;
+
+public:
+  ChrRegions() { }
+
+  int readBedfile(const std::string& bedfile) {
+    std::ifstream ifs(bedfile.c_str());
+    if (ifs.bad() || ifs.fail()) {
+      std::cerr << prog << " cannot open bed file: " << bedfile << " for reading\n";
+      return 1;
+    }
+    char buffer[4096];
+    size_t line_num = 0;
+    chrsize_t lastend = 0;
+    char lastchr[2048] = {0};
+    while (!ifs.eof()) {
+      ifs.getline(buffer, sizeof(buffer)-1);
+      line_num++;
+      if (is_empty_line(buffer)) {
+	continue;
+      }
+      chrsize_t start = 0;
+      chrsize_t end = 0;
+      char chr[2048];
+      if (bed_line_parse(buffer, chr, start, end, bedfile, line_num)) {
+	return 1;
+      }
+      if (intervals.find(chr) == intervals.end()) {
+	intervals[chr] = new std::vector<Interval>();
+	chr_v.push_back(chr);
+      }
+      /*
+      if (lastend != 0 && !strcmp(lastchr, chr) && start != lastend) {
+	std::cerr << "warning: discontinuous segment for chromosome " << chr << " at position " << start << " " << end << std::endl;
+      }
+      */
+      if (*lastchr && strcmp(lastchr, chr)) {
+	lastend = 0;
+      }
+
+      if (lastend != 0 && start < lastend) {
+	std::cerr << "error: bedfile not sorted at line #" << line_num << std::endl;
+	exit(1);
+      }
+      strcpy(lastchr, chr);
+      lastend = end;
+      intervals[chr]->push_back(Interval(start, end));
+      if (progress && (line_num % 100000) == 0) {
+	std::cerr << '.' << std::flush;
+      }
+    }
+    if (progress) {
+      std::cerr << std::endl;
+    }
+    return 0;
+  }
+
+  void displayBed(std::ostream& ofs, const std::vector<AxisChromosome*>& axis_chr) const {
+    std::vector<std::string>::const_iterator begin = chr_v.begin();
+    std::vector<std::string>::const_iterator end = chr_v.end();
+    unsigned int num = 1;
+    while (begin != end) {
+      const std::string& chrname = *begin;
+      std::map<std::string, std::vector<Interval>* >::const_iterator iter = intervals.find(chrname);
+      assert(iter != intervals.end());
+      const std::vector<Interval>* itv_vect = (*iter).second;
+      std::vector<Interval>::const_iterator itv_begin = itv_vect->begin();
+      std::vector<Interval>::const_iterator itv_end = itv_vect->end();
+      while (itv_begin != itv_end) {
+	const Interval& itv = (*itv_begin);
+	ofs << chrname << '\t' << itv.start << '\t' << itv.end << '\t' << num << '\n';
+	if (progress && (num % 100000) == 0) {
+	  std::cerr << '.' << std::flush;
+	}
+	num++;
+	++itv_begin;
+      }
+      ++begin;
+    }
+    if (progress) {
+      std::cerr << std::endl;
+    }
+  }
+
+  const std::vector<Interval>* getIntervalsFromChr(const std::string& chr) const {
+    std::map<std::string, std::vector<Interval>* >::const_iterator iter = intervals.find(chr);
+    if (iter != intervals.end()) {
+      return (*iter).second;
+    }
+    return NULL;
+  }
+};
+
+class Dichotomic {
+
+  int min, max;
+  const std::vector<Interval>& intervals;
+
+public:
+  Dichotomic(const std::vector<Interval>& intervals) : intervals(intervals) {
+    //min = middle(intervals[0]);
+    //max = middle(intervals[intervals.size()-1]);
+    min = 0;
+    max = intervals.size()-1;
+  }
+
+  static chrsize_t middle(const Interval& itv) {
+    return (itv.start+1 + itv.end) / 2;
+  }
+
+  int find(chrsize_t value) {
+    int l = min;
+    int r = max;
+    int n = 0;
+    while (l <= r) {
+      n = (l + r) >> 1;
+      const Interval& itv = intervals[n];
+      if (value >= itv.start+1 && value <= itv.end) {
+	return n;
+      }
+
+      int x = middle(itv) - value;
+      
+      if (x < 0) {
+	l = n + 1;
+      } else {
+	r = n - 1;
+      }
+      //std::cout << "l: " << l << '\n';
+      //std::cout << "r: " << r << '\n';
+    }
+
+    return -1;
+  }
+};
+
+class Chromosome {
+
+private:
+  static std::unordered_map<std::string, Chromosome*> chr_map;
+
+  void computeSizes(chrsize_t ori_binsize, chrsize_t step, bool binadjust, const ChrRegions* chr_regions);
+
+  std::string name;
+
+  chrsize_t chrsize;
+
+  chrsize_t binsize;
+  chrsize_t stepsize;
+  chrsize_t bincount;
+
+  const ChrRegions* chr_regions;
+
+public:
+  Chromosome(const std::string& name, chrsize_t chrsize, chrsize_t ori_binsize, chrsize_t step, bool binadjust, const ChrRegions* chr_regions) : name(name), chrsize(chrsize), chr_regions(chr_regions) {
+    computeSizes(ori_binsize, step, binadjust, chr_regions);
+    assert(chr_map.find(name) == chr_map.end());
+    chr_map[name] = this;
+  }
+
+  void adjustBinsize(chrsize_t ori_binsize, const chrsize_t step);
+
+  const std::string& getName() const {return name;}
+  chrsize_t getChrsize() const {return chrsize;}
+  chrsize_t getBinsize() const {return binsize;}
+  chrsize_t getStepsize() const {return stepsize;}
+  chrsize_t getBincount() const {return bincount;}
+
+  const ChrRegions* getChrRegions() const {return chr_regions;}
+
+  static chrsize_t getCount() {
+    return chr_map.size();
+  }
+
+  static Chromosome* getByName(const std::string& name) {
+    return chr_map[name];
+  }
+};
+
+class AxisChromosome {
+  int idx; // really needed ?
+  const Chromosome* chr;
+  chrsize_t binstart;
+  chrsize_t binend;
+
+public:
+  AxisChromosome(int binoffset, const Chromosome* chr, const AxisChromosome* lastAxisChr) : chr(chr) {
+    if (lastAxisChr != NULL) {
+      binstart = lastAxisChr->getBinend();
+    } else {
+      binstart = binoffset;
+    }
+    binend = binstart + chr->getBincount();
+    /*
+    if (verbose) {
+      std::cerr << "AxisChromosome: " << chr->getName() << " " << binstart << " " << binend << " " << chr->getBincount() << std::endl;
+    }
+    */
+  }
+
+  chrsize_t getBinstart() const {return binstart;}
+  chrsize_t getBinend() const {return binend;}
+  chrsize_t getChrsize() const {return chr->getChrsize();}
+  chrsize_t getBinsize() const {return chr->getBinsize();}
+  chrsize_t getStepsize() const {return chr->getStepsize();}
+  chrsize_t getBincount() const {return chr->getBincount();}
+
+  const Chromosome* getChromosome() const {return chr;}
+
+  chrsize_t assign_bin(const std::string& org, chrsize_t start) const {
+    const ChrRegions* chr_regions = chr->getChrRegions();
+    if (chr_regions != NULL) {
+      const std::vector<Interval>* intervals = chr_regions->getIntervalsFromChr(chr->getName());
+      assert(intervals != NULL);
+
+      if (!NO_DICHO) {
+	Dichotomic dicho(*intervals);
+	int where = dicho.find(start);
+	if (where < 0) {
+	  if (!quiet) {
+	    std::cerr << "warning: no bin at position " << chr->getName() << ":" << start << std::endl;
+	  }
+	  return BIN_NOT_FOUND;
+	}
+	return where + getBinstart();
+      }
+
+      std::vector<Interval>::const_iterator begin = intervals->begin();
+      std::vector<Interval>::const_iterator end = intervals->end();
+
+      chrsize_t binidx = 1;
+      while (begin != end) {
+	const Interval& itv = *begin;
+	if (start >= itv.start+1 && start <= itv.end) {
+	  break;
+	}
+	++binidx;
+	++begin;
+      }
+      
+      return binidx + getBinstart() - 1;
+    }
+
+    int loc = (int)start;
+    int binsize = getBinsize();
+    int stepsize = getStepsize();
+    int cur_binidx = 1 + ceil((double)(loc-binsize)/stepsize);
+    int cur_binbeg = stepsize * (cur_binidx-1)+1;
+    int cur_binend = cur_binbeg + binsize-1;
+    int chrsize = getChrsize();
+    if (cur_binend > chrsize) {
+      cur_binend = chrsize;
+    } 
+    return cur_binidx + getBinstart() - 1;
+  }
+};
+
+class Matrix {
+
+  std::vector<AxisChromosome*> axis_chr_abs;
+  std::vector<AxisChromosome*> axis_chr_ord;
+  std::unordered_map<std::string, AxisChromosome*> axis_chr_abs_map;
+  std::unordered_map<std::string, AxisChromosome*> axis_chr_ord_map;
+
+  std::map<chrsize_t, std::map<chrsize_t, chrsize_t> > mat;
+
+  void addAxisChromosome(const std::vector<const Chromosome*>& chr_v, std::vector<AxisChromosome*>& axis_chr, std::unordered_map<std::string, AxisChromosome*>& axis_chr_map);
+
+  const AxisChromosome* getAxisChromosome(const std::string& chrname, const std::unordered_map<std::string, AxisChromosome*>& axis_chr_map) const {
+    std::unordered_map<std::string, AxisChromosome*>::const_iterator iter = axis_chr_map.find(chrname);
+    if (iter == axis_chr_map.end()) {
+      return NULL;
+    }
+    return (*iter).second;
+  }
+
+  void displayBed(std::ostream& ofs, const std::vector<AxisChromosome*>& axis_chr) const {
+    std::vector<AxisChromosome*>::const_iterator begin = axis_chr.begin();
+    std::vector<AxisChromosome*>::const_iterator end = axis_chr.end();
+    while (begin != end) {
+      const AxisChromosome* axis_chr = *begin;
+      const std::string& name = axis_chr->getChromosome()->getName();
+      chrsize_t binstart = axis_chr->getBinstart();
+      chrsize_t binend = axis_chr->getBinend();
+      chrsize_t binsize = axis_chr->getBinsize();
+      chrsize_t chrsize = axis_chr->getChrsize();
+      binend -= binstart;
+      for (chrsize_t bin = 0; bin < binend; ++bin) {
+	// bed are 0-based begin, 1-based end
+	chrsize_t beg = bin * binsize;
+	chrsize_t end = beg + binsize - 1;
+	if (end > chrsize) {
+	  end = chrsize-1;
+	}
+	ofs << name << '\t' << beg << '\t' << (end+1) << '\t' << (bin+binstart) << '\n';
+      }
+      ++begin;
+    }
+  }
+
+  int binoffset;
+
+public:
+  Matrix(int binoffset) : binoffset(binoffset) {}
+
+  void addXAxisChromosome(const std::vector<const Chromosome*>& chr_v);
+  void addYAxisChromosome(const std::vector<const Chromosome*>& chr_v);
+
+  const AxisChromosome* getXAxisChromosome(const std::string& chrname) const {
+    return getAxisChromosome(chrname, axis_chr_abs_map);
+  }
+
+  const AxisChromosome* getYAxisChromosome(const std::string& chrname) const {
+    return getAxisChromosome(chrname, axis_chr_ord_map);
+  }
+
+  void add(chrsize_t abs_bin, chrsize_t ord_bin) {
+    std::map<chrsize_t, std::map<chrsize_t, chrsize_t> >::iterator iter = mat.find(abs_bin);
+    if (iter == mat.end()) {
+      mat[abs_bin] = std::map<chrsize_t, chrsize_t>();
+      mat[abs_bin][ord_bin] = 1;
+    } else {
+      (*iter).second[ord_bin]++;
+    }
+  }
+
+  void displayMatrix(std::ostream& ofs) const {
+    std::map<chrsize_t, std::map<chrsize_t, chrsize_t> >::const_iterator begin = mat.begin();
+    std::map<chrsize_t, std::map<chrsize_t, chrsize_t> >::const_iterator end = mat.end();
+    size_t line_total = 0;
+    if (progress) {
+      while (begin != end) {
+	const std::map<chrsize_t, chrsize_t>& line = (*begin).second;
+	line_total += line.size();
+	++begin;
+      }
+      begin = mat.begin();
+    }
+
+    size_t line_cnt = 1;
+    if (progress) {
+      std::cerr << "\n=================\n";
+      std::cerr << " Dumping matrix\n";
+      std::cerr << "=================\n\n";
+    }
+    size_t modulo = line_total / 1000;
+    while (begin != end) {
+      chrsize_t abs = (*begin).first;
+      const std::map<chrsize_t, chrsize_t>& line = (*begin).second;
+      std::map<chrsize_t, chrsize_t>::const_iterator bb = line.begin();
+      std::map<chrsize_t, chrsize_t>::const_iterator ee = line.end();
+      while (bb != ee) {
+	if (progress && (line_cnt % modulo) == 0) {
+	  double percent = (double(line_cnt)/line_total)*100;
+	  std::cerr << "" << percent << "% " << line_cnt << " / " << line_total << std::endl;
+	}
+	ofs << abs << '\t' << (*bb).first << '\t' << (*bb).second << '\n';
+	line_cnt++;
+	++bb;
+      }
+      ++begin;
+    }
+  }
+
+  void displayXBed(std::ostream& ofs) const {
+    displayBed(ofs, axis_chr_abs);
+  }
+
+  void displayYBed(std::ostream& ofs) const {
+    displayBed(ofs, axis_chr_ord);
+  }
+
+  const std::vector<AxisChromosome*>& getXAxisChromosomes() {return axis_chr_abs;}
+  const std::vector<AxisChromosome*>& getYAxisChromosomes() {return axis_chr_ord;}
+};
+
+void Matrix::addAxisChromosome(const std::vector<const Chromosome*>& chr_v, std::vector<AxisChromosome*>& axis_chr, std::unordered_map<std::string, AxisChromosome*>& axis_chr_map)
+{
+  std::vector<const Chromosome*>::const_iterator begin = chr_v.begin();
+  std::vector<const Chromosome*>::const_iterator end = chr_v.end();
+
+  const AxisChromosome* lastAxisChr = NULL;
+  while (begin != end) {
+    const Chromosome* chr = *begin;
+    AxisChromosome* axisChr = new AxisChromosome(binoffset, chr, lastAxisChr);
+    axis_chr.push_back(axisChr);
+    axis_chr_map[chr->getName()] = axisChr;
+    lastAxisChr = axisChr;
+    ++begin;
+  }
+}
+
+void Matrix::addXAxisChromosome(const std::vector<const Chromosome*>& chr_v)
+{
+  addAxisChromosome(chr_v, axis_chr_abs, axis_chr_abs_map);
+}
+
+void Matrix::addYAxisChromosome(const std::vector<const Chromosome*>& chr_v)
+{
+  addAxisChromosome(chr_v, axis_chr_ord, axis_chr_ord_map);
+}
+
+std::unordered_map<std::string, Chromosome*> Chromosome::chr_map;
+
+enum Format {
+  SPARSE_IND_FMT = SPARSE_FMT,
+  SPARSE_BED_FMT = SPARSE_FMT|BED_FMT,
+  EXPANDED_FMT = 0x4
+};
+
+void Chromosome::adjustBinsize(chrsize_t ori_binsize, const chrsize_t step)
+{
+  bincount = 1 + (chrsize_t)floor( (double)(chrsize-ori_binsize) / (ori_binsize/step));
+  binsize = chrsize / bincount;
+  stepsize = binsize / step;
+}
+
+void Chromosome::computeSizes(chrsize_t ori_binsize, chrsize_t step, bool binadjust, const ChrRegions* chr_regions)
+{
+  if (NULL != chr_regions) {
+    const std::vector<Interval>* intervals = chr_regions->getIntervalsFromChr(name);
+    assert(intervals != NULL);
+    bincount = intervals->size();
+    /*
+    if (verbose) {
+      std::cerr << name << " bincount: " << bincount << std::endl;
+    }
+    */
+  } else {
+    if (chrsize < ori_binsize) {
+      binsize = chrsize;
+      stepsize = chrsize;
+      bincount = 1;
+    } else if (binadjust) {
+      adjustBinsize(ori_binsize, step);
+    } else {
+      binsize = ori_binsize;
+      stepsize = (chrsize_t)floor(ori_binsize/step);
+      chrsize_t remainder = (chrsize - ori_binsize) % stepsize;
+      chrsize_t tmp_bincount = 1 + (chrsize_t)floor(chrsize-ori_binsize)/stepsize;
+      bincount = remainder > 0 ? tmp_bincount+1 : tmp_bincount;
+    }
+    /*
+    if (verbose) {
+      std::cerr << name << " sizes: " << chrsize << " " << binsize << " " << stepsize << " " << bincount << std::endl;
+    }
+    */
+  }
+}
+
+static int usage(int ret = 1)
+{
+  std::cerr << "\nusage: " << prog << " --binsize BINSIZE|--binfile --chrsizes FILE --ifile FILE\n";
+  std::cerr << "       --oprefix PREFIX [--binadjust] [--step STEP] [--binoffset OFFSET]\n";
+  std::cerr << "       [--matrix-format asis|upper|lower|complete][--chrA CHR... --chrB CHR...] [--quiet] [--progress] [--detail-progress]\n";
+  std::cerr << "\nusage: " << prog << " --version\n";
+  std::cerr << "\nusage: " << prog << " --help\n";
+  return ret;
+}
+
+static int help()
+{
+  (void)usage();
+  std::cerr << "\nOPTIONS\n\n";
+  std::cerr << "  --version              : display version\n";
+  std::cerr << "  --binsize BINSIZE      : bin size\n";
+  std::cerr << "  --binfile BEDFILE      : bed file containing bins (chr start end)\n";
+  std::cerr << "  --chrsizes FILE        : file containing chromosome sizes\n";
+  std::cerr << "  --ifile FILE           : input interaction file\n";
+  std::cerr << "  --oprefix PREFIX       : output prefix of generated files (matrix and bed)\n";
+  std::cerr << "  --binadjust            : [optional] adjust bin sizes, default is false\n";
+  std::cerr << "  --step STEP            : [optional] step size, default is 1\n";
+  std::cerr << "  --binoffset OFFSET     : [optional] starting bin offset, default is 1\n";
+  std::cerr << "  --matrix-format FORMAT : [optional] FORMAT may be:\n";
+  std::cerr << "                           - asis: matrix is generated according to input data (default)\n";
+  std::cerr << "                           - upper: only the upper matrix is generated\n";
+  std::cerr << "                           - lower: only the lower matrix is generated\n";
+  std::cerr << "                           - complete: generate both parts of the matrix (upper and lower);\n";
+  std::cerr << "                             input data must contain only one part (upper or lower) \n";
+  std::cerr << "  --chrA CHR             : [optional] colon separated list of abscissa chromosomes; default is all chromosomes\n";
+  std::cerr << "  --chrB CHR             : [optional] colon separated list of ordinate chromosomes; default is all chromosomes\n";
+  std::cerr << "  --quiet                : do not display any warning\n";
+  std::cerr << "  --progress             : display progress\n";
+  std::cerr << "  --detail-progress      : display detail progress (needs preliminary steps consuming time)\n";
+  return -1;
+}
+
+enum MatrixFormat {
+  ASIS_MATRIX = 1,
+  UPPER_MATRIX,
+  LOWER_MATRIX,
+  COMPLETE_MATRIX
+};
+  
+static int get_options(int argc, char* argv[], chrsize_t& binsize, const char*& binfile, const char*& chrsize_file, const char*& ifile, const char*& oprefix, Format& format, std::string& bed_prefix, bool& binadjust, MatrixFormat& matrix_format, chrsize_t& step, bool& whole_genome, int& binoffset, const char*& chrA, const char*& chrB)
+{
+  prog = argv[0];
+  for (int ac = 1; ac < argc; ++ac) {
+    const char* opt = argv[ac];
+    if (*opt == '-') {
+      if (!strcmp(opt, "--binadjust")) {
+	binadjust = true;
+      } else if (!strcmp(opt, "--version")) {
+	std::cout << "build_matrix version " << VERSION << "\n";
+	exit(0);
+      } else if (!strcmp(opt, "--progress")) {
+	progress = true;
+      } else if (!strcmp(opt, "--quiet")) {
+	quiet = true;
+      } else if (!strcmp(opt, "--detail-progress")) {
+	progress = true;
+	detail_progress = true;
+      } else if (!strcmp(opt, "--matrix-format")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	std::string matrix_format_str = argv[++ac];
+	if (matrix_format_str == "asis") {
+	  matrix_format = ASIS_MATRIX;
+	} else if (matrix_format_str == "upper") {
+	  matrix_format = UPPER_MATRIX;
+	} else if (matrix_format_str == "lower") {
+	  matrix_format = LOWER_MATRIX;
+	} else if (matrix_format_str == "complete") {
+	  matrix_format = COMPLETE_MATRIX;
+	} else {
+	  return usage();
+	}
+      } else if (!strcmp(opt, "--step")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	step = atoi(argv[++ac]);
+      } else if (!strcmp(opt, "--binfile")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	binfile = argv[++ac];
+      } else if (!strcmp(opt, "--binsize")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	binsize = atoi(argv[++ac]);
+      } else if (!strcmp(opt, "--binoffset")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	binoffset = atoi(argv[++ac]);
+      } else if (!strcmp(opt, "--ifile")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	ifile = argv[++ac];
+      } else if (!strcmp(opt, "--oprefix")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	oprefix = argv[++ac];
+      } else if (!strcmp(opt, "--chrsizes")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	chrsize_file = argv[++ac];
+      } else if (!strcmp(opt, "--chrA")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	chrA = argv[++ac];
+	whole_genome = false;
+      } else if (!strcmp(opt, "--chrB")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	chrB = argv[++ac];
+	whole_genome = false;
+      } else if (!strcmp(opt, "--help")) {
+	return help();
+      } else {
+	std::cerr << '\n' << prog << ": unknown option " << opt << std::endl;
+	return usage();
+      }
+    }
+  }
+
+  return 0;
+}
+
+static void split_in_vect(const std::string& str, std::vector<const Chromosome*>& vect)
+{
+  size_t last_pos = 0;
+  while (size_t pos = str.find(':', last_pos)) {
+    std::string chrname;
+    bool last = pos == std::string::npos;
+    if (last) {
+      chrname = str.substr(last_pos);
+    } else {
+      chrname = str.substr(last_pos, pos-last_pos);
+    }
+    const Chromosome* chr = Chromosome::getByName(chrname);
+    if (!chr) {
+      std::cerr << prog << ": unknown chromosome " << chrname << std::endl;
+      exit(1);
+    }
+    vect.push_back(chr);
+    if (last) {
+      break;
+    }
+    last_pos = pos+1;
+  }
+}
+
+static int interaction_parse(char* buffer, char*& lchr, chrsize_t& lstart, char*& rchr, chrsize_t& rstart)
+{
+  char c;
+  char* str;
+  while ((c = *buffer++) != 0) {
+    if (c == '\t') {
+      lchr = buffer;
+      break;
+    }
+  }
+  while ((c = *buffer) != 0) {
+    if (c == '\t') {
+      *buffer++ = 0;
+      str = buffer;
+      break;
+    }
+    buffer++;
+  }
+
+  while ((c = *buffer) != 0) {
+    if (c == '\t') {
+      *buffer++ = 0;
+      lstart = atoi(str);
+      break;
+    }
+    buffer++;
+  }
+
+  while ((c = *buffer++) != 0) {
+    if (c == '\t') {
+      rchr = buffer;
+      break;
+    }
+  }
+
+  while ((c = *buffer) != 0) {
+    if (c == '\t') {
+      *buffer++ = 0;
+      str = buffer;
+      break;
+    }
+    buffer++;
+  }
+
+  while ((c = *buffer) != 0) {
+    if (c == '\t') {
+      *buffer++ = 0;
+      rstart = atoi(str);
+      break;
+    }
+    buffer++;
+  }
+
+  return 0;
+}
+
+static char p_buffer[512000];
+
+static int build_matrix_init(Matrix& matrix, const char* ifile, std::ifstream& ifs, const std::string& oprefix, std::ofstream& matfs, std::ofstream& xbedfs, std::ofstream& ybedfs, const char* chrsize_file, bool whole_genome, const char* chrA, const char* chrB, chrsize_t ori_binsize, const char* binfile, chrsize_t step, bool binadjust, ChrRegions*& chr_regions, size_t& line_total)
+{
+  ifs.open(ifile);
+  if (ifs.bad() || ifs.fail()) {
+    std::cerr << prog << " cannot open interaction file: " << ifile << " for reading\n";
+    return 1;
+  }
+
+  if (detail_progress) {
+    if (progress) {
+      std::cerr << "\n======================================\n";
+      std::cerr << " Getting information for progress bar\n";
+      std::cerr << "======================================\n\n";
+    }
+    std::cerr << std::setprecision(2) << std::fixed;
+    int fd = open(ifile, O_RDONLY);
+    struct stat st;
+    assert(fstat(fd, &st) == 0);
+    assert(fd >= 0);
+    int nn;
+    int cnt = 1;
+    while ((nn = read(fd, p_buffer, sizeof(p_buffer))) > 0) {
+      const char *p = p_buffer;
+      while (nn-- > 0) {
+	if (*p++ == '\n') {
+	  line_total++;
+	}
+      }
+      if ((cnt % 200) == 0) {
+	std::cerr << '.' << std::flush;
+      }
+      cnt++;
+    }
+    std::cerr << std::endl;
+    close(fd);
+  }
+  
+  std::ifstream chrsizefs;
+  chrsizefs.open(chrsize_file);
+  if (chrsizefs.bad() || chrsizefs.fail()) {
+    std::cerr << prog << " cannot open chrsizes file: " << chrsize_file << " for reading\n";
+    return 1;
+  }
+
+  std::string matfile = oprefix + ".matrix";
+  matfs.open(matfile);
+  if (matfs.bad() || matfs.fail()) {
+    std::cerr << prog << " cannot open file: " << matfile << " for writing\n";
+    return 1;
+  }
+
+  std::string xbedfile = oprefix + "_abs.bed";
+  xbedfs.open(xbedfile);
+  if (xbedfs.bad() || xbedfs.fail()) {
+    std::cerr << prog << " cannot open file: " << xbedfile << " for writing\n";
+    return 1;
+  }
+
+  std::string ybedfile = oprefix + "_ord.bed";
+  if (!whole_genome) {
+    //std::string xbedlink;
+    //size_t pos = xbedfile.rfind('/');
+    //if (pos != std::string::npos) {
+    //  xbedlink = xbedfile.substr(pos+1);
+    //} else {
+    //  xbedlink = xbedfile;
+    //}
+    //unlink(ybedfile.c_str());
+    //if (symlink(xbedlink.c_str(), ybedfile.c_str())) {
+    //  std::cerr << prog << " cannot created link: " << ybedfile << "\n";
+    //  return 1;
+    //}
+    //} else {
+    ybedfs.open(ybedfile);
+    if (ybedfs.bad() || ybedfs.fail()) {
+      std::cerr << prog << " cannot open file: " << ybedfile << " for writing\n";
+      return 1;
+    }
+  }
+
+  chr_regions = NULL;
+  if (NULL != binfile) {
+    chr_regions = new ChrRegions();
+    if (progress) {
+      std::cerr << "\n=================\n";
+      std::cerr << " Reading binfile\n";
+      std::cerr << "=================\n\n";
+    }
+    if (chr_regions->readBedfile(binfile)) {
+      return 1;
+    }
+  }
+
+  std::vector<const Chromosome*> all_chr_v;
+  while (!chrsizefs.eof()) {
+    std::string buffer;
+    getline(chrsizefs, buffer);
+
+    chrsize_t chrsize;
+    std::istringstream istr(buffer);
+    std::string name;
+    istr >> name >> chrsize;
+    if (!istr.fail()) {
+      Chromosome* chromosome = new Chromosome(name, chrsize, ori_binsize, step, binadjust, chr_regions);
+      all_chr_v.push_back(chromosome);
+    }
+  }
+
+  chrsizefs.close();
+
+  if (chrA) {
+    assert(chrB != NULL);
+    std::vector<const Chromosome*> chrA_v;
+    std::vector<const Chromosome*> chrB_v;
+    split_in_vect(chrA, chrA_v);
+    split_in_vect(chrB, chrB_v);
+    matrix.addXAxisChromosome(chrA_v);
+    matrix.addYAxisChromosome(chrB_v);
+  } else {
+    matrix.addXAxisChromosome(all_chr_v);
+    matrix.addYAxisChromosome(all_chr_v);
+  }
+
+  return 0;
+}
+
+static int build_matrix(int binoffset, chrsize_t ori_binsize, const char* binfile, const char* chrsize_file, const char* ifile, const char* oprefix, Format _dummy_format, const std::string& _dummy_bed_prefix, bool binadjust, MatrixFormat matrix_format, chrsize_t step, bool whole_genome, const char* chrA, const char* chrB)
+{
+  std::ifstream ifs;
+  std::ofstream matfs, xbedfs, ybedfs;
+
+  Matrix matrix(binoffset);
+  ChrRegions *chr_regions = NULL;
+  size_t line_total = 0;
+  if (int ret = build_matrix_init(matrix, ifile, ifs, oprefix, matfs, xbedfs, ybedfs, chrsize_file, whole_genome, chrA, chrB, ori_binsize, binfile, step, binadjust, chr_regions, line_total)) {
+    return ret;
+  }
+
+  if (progress) {
+    std::cerr << "\n=================\n";
+    std::cerr << " Building matrix\n";
+    std::cerr << "=================\n\n";
+  }
+  size_t line_cnt = 1;
+  size_t line_num = 0;
+  char buffer[4096];
+  std::string lmark, rmark, lorg, rorg;
+  while (!ifs.eof()) {
+    ifs.getline(buffer, sizeof(buffer)-1);
+    line_num++;
+    if (is_empty_line(buffer)) {
+      continue;
+    }
+    chrsize_t lstart = 0;
+    chrsize_t rstart = 0;
+    char* lchr = NULL;
+    char* rchr = NULL;
+    interaction_parse(buffer, lchr, lstart, rchr, rstart);
+    const AxisChromosome* abs_chr = matrix.getXAxisChromosome(lchr);
+    if (!abs_chr) {
+      continue;
+    }
+    const AxisChromosome* ord_chr = matrix.getYAxisChromosome(rchr);
+    if (!ord_chr) {
+      continue;
+    }
+    chrsize_t abs_bin = abs_chr->assign_bin(lorg, lstart);
+    if (abs_bin == BIN_NOT_FOUND) {
+      continue;
+    }
+    chrsize_t ord_bin = ord_chr->assign_bin(rorg, rstart);
+    if (ord_bin == BIN_NOT_FOUND) {
+      continue;
+    }
+    switch(matrix_format) {
+
+    case ASIS_MATRIX:
+      matrix.add(abs_bin, ord_bin);
+      break;
+
+    case UPPER_MATRIX:
+      if (abs_bin < ord_bin) {
+	matrix.add(abs_bin, ord_bin);
+      } else {
+	matrix.add(ord_bin, abs_bin);
+      }
+      break;
+
+    case LOWER_MATRIX:
+      if (abs_bin > ord_bin) {
+	matrix.add(abs_bin, ord_bin);
+      } else {
+	matrix.add(ord_bin, abs_bin);
+      }
+      break;
+
+    case COMPLETE_MATRIX:
+      matrix.add(abs_bin, ord_bin);
+      if (abs_bin != ord_bin) {
+	matrix.add(ord_bin, abs_bin);
+      }
+      break;
+    }
+    line_cnt++;
+    if (progress && (line_cnt % 100000) == 0) {
+      if (detail_progress) {
+	double percent = (double(line_cnt)/line_total)*100;
+	std::cerr << "" << percent << "% " << line_cnt << " / " << line_total << std::endl;
+      } else {
+	std::cerr << line_cnt << std::endl;
+      }
+    }
+  }
+
+  if (progress) {
+    std::cerr << "\n==================\n";
+    std::cerr << " Dumping bedfiles\n";
+    std::cerr << "==================\n\n";
+  }
+
+  if (NULL != chr_regions) {
+    chr_regions->displayBed(xbedfs, matrix.getXAxisChromosomes());
+    if (!whole_genome) {
+      chr_regions->displayBed(ybedfs, matrix.getYAxisChromosomes());
+    }
+  } else {
+    matrix.displayXBed(xbedfs);
+    if (!whole_genome) {
+      matrix.displayYBed(ybedfs);
+    }
+  }
+  matrix.displayMatrix(matfs);
+  xbedfs.close();
+  ybedfs.close();
+  matfs.close();
+  return 0;
+}
+
+int main(int argc, char* argv[])
+{
+  chrsize_t step = 1;
+  bool binadjust = false;
+  MatrixFormat matrix_format = ASIS_MATRIX;
+  chrsize_t binsize = 0;
+  const char* ifile = NULL;
+  const char* oprefix = NULL;
+  const char* chrA = NULL;
+  const char* chrB = NULL;
+  const char* chrsize_file = NULL;
+  const char* binfile = NULL;
+  bool whole_genome = true;
+  int binoffset = 1;
+  std::string bed_prefix;
+  Format format = SPARSE_BED_FMT;
+
+  if (int ret = get_options(argc, argv, binsize, binfile, chrsize_file, ifile, oprefix, format, bed_prefix, binadjust, matrix_format, step, whole_genome, binoffset, chrA, chrB)) {
+    if (ret < 0) {
+      return 0;
+    }
+    return ret;
+  }
+
+  if (!binsize && !binfile) {
+    std::cerr << '\n';
+    std::cerr << prog << ": missing --binsize or --binfile option\n";
+    return usage();
+  }
+
+  if (!chrsize_file) {
+    std::cerr << '\n';
+    std::cerr << prog << ": missing --chrsizes option\n";
+    return usage();
+  }
+
+  if (!ifile) {
+    std::cerr << '\n';
+    std::cerr << prog << ": missing --ifile option\n";
+    return usage();
+  }
+
+  if (!oprefix) {
+    std::cerr << '\n';
+    std::cerr << prog << ": missing --oprefix option\n";
+    return usage();
+  }
+
+  if ((chrA && !chrB) || (!chrA && chrB)) {
+    std::cerr << '\n';
+    std::cerr << prog << ": options --chrA and --chrB must be set simultanously\n";
+    return usage();
+  }
+
+  if (binfile && binsize) {
+    std::cerr << '\n';
+    std::cerr << prog << ": options --binfile and --binsize cannot be set simultanously\n";
+    return usage();
+  }
+
+  return build_matrix(binoffset, binsize, binfile, chrsize_file, ifile, oprefix, format, bed_prefix, binadjust, matrix_format, step, whole_genome, chrA, chrB);
+}
diff --git a/bin/src/cutsite_trimming.cpp b/bin/src/cutsite_trimming.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c7b6608123666e18364f379e3a48de03203102b7
--- /dev/null
+++ b/bin/src/cutsite_trimming.cpp
@@ -0,0 +1,143 @@
+// HiC-Pro
+// Copyright 2015 Institut Curie
+// Author(s): Nicolas Servant
+// Contact: nicolas.servant@curie.fr
+// This software is distributed without any guarantee under the terms of the BSD-3 licence
+// g++ -std=c++0x -o cutsite_trimming cutsite_trimming.cpp
+//./cutsite_trimming -fastq fastq -cutsite AGCTT
+
+#include <iostream>     // std::cout
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+#include <fstream>
+
+static const char* prog;
+
+static int usage(int ret=1)
+{
+    std::cerr << "usage: " << prog << " --fastq FASTQFILE --cutsite CUTSITE --out OUTFILE [--rmuntrim] \n";
+    std::cerr << "usage: " << prog << " --help\n";
+    return ret;
+}
+
+static int get_options(int argc, char* argv[], std::string& fastqFile, std::vector<std::string>& cutSites, std::string& output, bool& rmuntrim)
+{
+    prog = argv[0];
+    if (argc == 1){
+        exit(usage());
+    }
+    for (int ac = 1; ac < argc; ++ac) {
+        const char* opt = argv[ac];
+        if (*opt == '-') {
+            if (!strcmp(opt, "--fastq")) {
+                fastqFile = std::string(argv[++ac]);
+            }
+            else if (!strcmp(opt, "--cutsite")) {
+                std::string cutSitesSequence;
+                cutSitesSequence = std::string(argv[++ac]);
+                size_t pos = cutSitesSequence.find(",");
+                size_t begin = 0;
+                while(pos != std::string::npos){
+                    cutSites.push_back(cutSitesSequence.substr(begin, pos - begin));
+                    begin = pos + 1;
+                    pos = cutSitesSequence.find(",", begin + 1);
+                }
+                cutSites.push_back(cutSitesSequence.substr(begin, pos));
+            }
+            else if (!strcmp(opt, "--out")) {
+                output = std::string(argv[++ac]);
+            }
+            else if (!strcmp(opt, "--rmuntrim")) {
+                rmuntrim = true;
+            }
+        }else {
+            std::cerr << prog << ": unknown option " << opt << std::endl;
+            return usage();
+        }
+    }
+    return 0;
+}
+
+static int trim_fastq(std::string& fastqFile, std::vector<std::string>& cutSites, std::string& outFile, bool& rmuntrim){
+    int trim_count=0;
+    std::string ID;
+    std::ifstream ifs (fastqFile);
+    std::ofstream ofs (outFile);
+
+    if (ifs.is_open()){
+        while (getline(ifs, ID)) {
+            std::string seq;
+            std::string dummy;
+            std::string qual;
+
+            getline(ifs, seq);
+            getline(ifs, dummy);
+            getline(ifs, qual);
+
+            bool find_pos = false;
+            size_t pos = std::string::npos;
+            for (std::vector<std::string>::iterator it = cutSites.begin(); it != cutSites.end(); ++it){
+                size_t tmp_pos = seq.find(*it);
+                if (tmp_pos != std::string::npos) {
+                    // If find_pos is alread True, there is a problem (there are two cut
+                    // sites in the same read).)
+                    if (find_pos == true){
+                        if(tmp_pos < pos) {
+                            pos = tmp_pos;
+                        }
+                    } else {
+                        find_pos = true;
+                        pos = tmp_pos;
+                    }
+                }
+            }
+
+            if (pos != std::string::npos) {
+                trim_count++;
+                ofs << ID << '\n';
+                ofs << seq.substr(0, pos) << '\n';
+                ofs << "+\n";
+                ofs << qual.substr(0, pos) << '\n';
+            } else {
+                if (!rmuntrim){
+                    ofs << ID << '\n';
+                    ofs << seq << '\n';
+                    ofs << "+\n";
+                    ofs << qual << '\n';
+                }
+            }
+            find_pos = false;
+        }
+    }else{
+        std::cerr << "Error : Cannot open file : " << fastqFile;
+    }
+    return trim_count;
+}
+
+int main(int argc, char* argv[])
+{
+    std::string fastqFile;
+    std::vector<std::string> cutSites;
+    std::string outFile;
+    bool rmuntrim = false;
+
+    int ret = get_options(argc, argv, fastqFile, cutSites, outFile, rmuntrim);
+    printf("##Fastq file: %s\n", fastqFile.c_str());
+    printf("##Restriction sites:\n");
+    for(std::vector<std::string>::iterator it = cutSites.begin(); it != cutSites.end(); ++it){
+        std::cout << *it << std::endl;
+    }
+    printf("##Output File: %s\n", outFile.c_str());
+
+    if (fastqFile.empty() || cutSites.size() == 0 || outFile.empty()){
+        usage();
+        exit(ret);
+    }
+
+    int trim_count=trim_fastq(fastqFile, cutSites, outFile, rmuntrim);
+    printf("\n##Trimmed reads: %d\n", trim_count);
+    return(0);
+}
+
+
diff --git a/conf/base.config b/conf/base.config
index 74a33d164184236fcd78197fc390efb6725a69b1..2558cb1be3b55fa76fac6e98671f945093579d44 100644
--- a/conf/base.config
+++ b/conf/base.config
@@ -10,10 +10,9 @@
 
 process {
 
-    // TODO nf-core: Check the defaults for all processes
     cpus   = { check_max( 1    * task.attempt, 'cpus'   ) }
-    memory = { check_max( 6.GB * task.attempt, 'memory' ) }
-    time   = { check_max( 4.h  * task.attempt, 'time'   ) }
+    memory = { check_max( 8.GB * task.attempt, 'memory' ) }
+    time   = { check_max( 12.h  * task.attempt, 'time'   ) }
 
     errorStrategy = { task.exitStatus in ((130..145) + 104) ? 'retry' : 'finish' }
     maxRetries    = 1
@@ -24,7 +23,6 @@ process {
     //        These labels are used and recognised by default in DSL2 files hosted on nf-core/modules.
     //        If possible, it would be nice to keep the same label naming convention when
     //        adding in your local modules too.
-    // TODO nf-core: Customise requirements for specific processes.
     // See https://www.nextflow.io/docs/latest/config.html#config-process-selectors
     withLabel:process_single {
         cpus   = { check_max( 1                  , 'cpus'    ) }
@@ -33,24 +31,24 @@ process {
     }
     withLabel:process_low {
         cpus   = { check_max( 2     * task.attempt, 'cpus'    ) }
-        memory = { check_max( 12.GB * task.attempt, 'memory'  ) }
+        memory = { check_max( 4.GB  * task.attempt, 'memory'  ) }
         time   = { check_max( 4.h   * task.attempt, 'time'    ) }
     }
     withLabel:process_medium {
         cpus   = { check_max( 6     * task.attempt, 'cpus'    ) }
-        memory = { check_max( 36.GB * task.attempt, 'memory'  ) }
+        memory = { check_max( 8.GB  * task.attempt, 'memory'  ) }
         time   = { check_max( 8.h   * task.attempt, 'time'    ) }
     }
     withLabel:process_high {
         cpus   = { check_max( 12    * task.attempt, 'cpus'    ) }
-        memory = { check_max( 72.GB * task.attempt, 'memory'  ) }
+        memory = { check_max( 64.GB * task.attempt, 'memory'  ) }
         time   = { check_max( 16.h  * task.attempt, 'time'    ) }
     }
     withLabel:process_long {
         time   = { check_max( 20.h  * task.attempt, 'time'    ) }
     }
     withLabel:process_high_memory {
-        memory = { check_max( 200.GB * task.attempt, 'memory' ) }
+        memory = { check_max( 24.GB * task.attempt, 'memory' ) }
     }
     withLabel:error_ignore {
         errorStrategy = 'ignore'
diff --git a/conf/igenomes.config b/conf/igenomes.config
index 3f1143775951af7fd2177938efd9445fa3db2f67..f4c32e3ac16734e10fe6da91f4467b0cbb39198e 100644
--- a/conf/igenomes.config
+++ b/conf/igenomes.config
@@ -13,28 +13,11 @@ params {
     genomes {
         'GRCh37' {
             fasta       = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/README.txt"
-            mito_name   = "MT"
-            macs_gsize  = "2.7e9"
-            blacklist   = "${projectDir}/assets/blacklists/GRCh37-blacklist.bed"
         }
         'GRCh38' {
             fasta       = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.bed"
-            mito_name   = "chrM"
-            macs_gsize  = "2.7e9"
-            blacklist   = "${projectDir}/assets/blacklists/hg38-blacklist.bed"
         }
         'CHM13' {
             fasta       = "${params.igenomes_base}/Homo_sapiens/UCSC/CHM13/Sequence/WholeGenomeFasta/genome.fa"
@@ -46,395 +29,143 @@ params {
         }
         'GRCm38' {
             fasta       = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/README.txt"
-            mito_name   = "MT"
-            macs_gsize  = "1.87e9"
-            blacklist   = "${projectDir}/assets/blacklists/GRCm38-blacklist.bed"
         }
         'TAIR10' {
             fasta       = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/README.txt"
-            mito_name   = "Mt"
         }
         'EB2' {
             fasta       = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/README.txt"
         }
         'UMD3.1' {
             fasta       = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/README.txt"
-            mito_name   = "MT"
         }
         'WBcel235' {
             fasta       = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Annotation/Genes/genes.bed"
-            mito_name   = "MtDNA"
-            macs_gsize  = "9e7"
         }
         'CanFam3.1' {
             fasta       = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/README.txt"
-            mito_name   = "MT"
         }
         'GRCz10' {
             fasta       = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Annotation/Genes/genes.bed"
-            mito_name   = "MT"
         }
         'BDGP6' {
             fasta       = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Annotation/Genes/genes.bed"
-            mito_name   = "M"
-            macs_gsize  = "1.2e8"
         }
         'EquCab2' {
             fasta       = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/README.txt"
-            mito_name   = "MT"
         }
         'EB1' {
             fasta       = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/README.txt"
         }
         'Galgal4' {
             fasta       = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Annotation/Genes/genes.bed"
-            mito_name   = "MT"
         }
         'Gm01' {
             fasta       = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/README.txt"
         }
         'Mmul_1' {
             fasta       = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/README.txt"
-            mito_name   = "MT"
         }
         'IRGSP-1.0' {
             fasta       = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Annotation/Genes/genes.bed"
-            mito_name   = "Mt"
         }
         'CHIMP2.1.4' {
             fasta       = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/README.txt"
-            mito_name   = "MT"
-        }
-        'Rnor_5.0' {
-            fasta       = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/BWAIndex/version0.6.0/"
-            bowtie2     = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_5.0/Annotation/Genes/genes.bed"
-            mito_name   = "MT"
         }
         'Rnor_6.0' {
             fasta       = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Annotation/Genes/genes.bed"
-            mito_name   = "MT"
         }
         'R64-1-1' {
             fasta       = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Annotation/Genes/genes.bed"
-            mito_name   = "MT"
-            macs_gsize  = "1.2e7"
         }
         'EF2' {
             fasta       = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/README.txt"
-            mito_name   = "MT"
-            macs_gsize  = "1.21e7"
         }
         'Sbi1' {
             fasta       = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/README.txt"
         }
         'Sscrofa10.2' {
             fasta       = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/README.txt"
-            mito_name   = "MT"
         }
         'AGPv3' {
             fasta       = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Annotation/Genes/genes.bed"
-            mito_name   = "Mt"
         }
         'hg38' {
             fasta       = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.bed"
-            mito_name   = "chrM"
-            macs_gsize  = "2.7e9"
-            blacklist   = "${projectDir}/assets/blacklists/hg38-blacklist.bed"
         }
         'hg19' {
             fasta       = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/README.txt"
-            mito_name   = "chrM"
-            macs_gsize  = "2.7e9"
-            blacklist   = "${projectDir}/assets/blacklists/hg19-blacklist.bed"
         }
         'mm10' {
             fasta       = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/README.txt"
-            mito_name   = "chrM"
-            macs_gsize  = "1.87e9"
-            blacklist   = "${projectDir}/assets/blacklists/mm10-blacklist.bed"
         }
         'bosTau8' {
             fasta       = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Annotation/Genes/genes.bed"
-            mito_name   = "chrM"
         }
         'ce10' {
             fasta       = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/README.txt"
-            mito_name   = "chrM"
-            macs_gsize  = "9e7"
         }
         'canFam3' {
             fasta       = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/README.txt"
-            mito_name   = "chrM"
         }
         'danRer10' {
             fasta       = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Annotation/Genes/genes.bed"
-            mito_name   = "chrM"
-            macs_gsize  = "1.37e9"
         }
         'dm6' {
             fasta       = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Annotation/Genes/genes.bed"
-            mito_name   = "chrM"
-            macs_gsize  = "1.2e8"
         }
         'equCab2' {
             fasta       = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/README.txt"
-            mito_name   = "chrM"
         }
         'galGal4' {
             fasta       = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/README.txt"
-            mito_name   = "chrM"
         }
         'panTro4' {
             fasta       = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/README.txt"
-            mito_name   = "chrM"
         }
         'rn6' {
             fasta       = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Annotation/Genes/genes.bed"
-            mito_name   = "chrM"
         }
         'sacCer3' {
             fasta       = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/BismarkIndex/"
-            readme      = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Annotation/README.txt"
-            mito_name   = "chrM"
-            macs_gsize  = "1.2e7"
         }
         'susScr3' {
             fasta       = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/WholeGenomeFasta/genome.fa"
-            bwa         = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/BWAIndex/version0.6.0/"
             bowtie2     = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/Bowtie2Index/"
-            star        = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/STARIndex/"
-            bismark     = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/BismarkIndex/"
-            gtf         = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/Genes/genes.gtf"
-            bed12       = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/Genes/genes.bed"
-            readme      = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/README.txt"
-            mito_name   = "chrM"
         }
     }
 }
diff --git a/conf/modules.config b/conf/modules.config
index da58a5d8817bdf25ac855f588c4fc67d840ad835..096a86006168e216e1f68863f65e4dd2d5d96c7d 100644
--- a/conf/modules.config
+++ b/conf/modules.config
@@ -1,41 +1,289 @@
-/*
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-    Config file for defining DSL2 per module options and publishing paths
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-    Available keys to override module options:
-        ext.args   = Additional arguments appended to command in module.
-        ext.args2  = Second set of arguments appended to command in module (multi-tool modules).
-        ext.args3  = Third set of arguments appended to command in module (multi-tool modules).
-        ext.prefix = File name prefix for output files.
-----------------------------------------------------------------------------------------
-*/
-
 process {
 
+    //Default
     publishDir = [
         path: { "${params.outdir}/${task.process.tokenize(':')[-1].tokenize('_')[0].toLowerCase()}" },
-        mode: params.publish_dir_mode,
+        mode: 'copy',
         saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
     ]
 
-    withName: SAMPLESHEET_CHECK {
+    withName: 'CUSTOM_DUMPSOFTWAREVERSIONS' {
         publishDir = [
             path: { "${params.outdir}/pipeline_info" },
-            mode: params.publish_dir_mode,
-            saveAs: { filename -> filename.equals('versions.yml') ? null : filename }
+            mode: 'copy',
+            pattern: '*_versions.yml'
         ]
     }
 
-    withName: FASTQC {
-        ext.args = '--quiet'
+    //**********************************************
+    // PREPARE_GENOME
+    withName: 'BOWTIE2_BUILD' {
+        publishDir = [
+            path: { "${params.outdir}/genome/bowtie2" },
+            mode: 'copy',
+            enabled: params.save_reference
+        ]
     }
 
-    withName: CUSTOM_DUMPSOFTWAREVERSIONS {
+    withName: 'CUSTOM_GETCHROMSIZES' {
         publishDir = [
-            path: { "${params.outdir}/pipeline_info" },
-            mode: params.publish_dir_mode,
-            pattern: '*_versions.yml'
+            path: { "${params.outdir}/genome" },
+            mode: 'copy',
+            enabled: params.save_reference
+        ]
+    }
+
+    withName: 'GET_RESTRICTION_FRAGMENTS' {
+        publishDir = [
+            path: { "${params.outdir}/genome" },
+            mode: 'copy',
+            enabled: params.save_reference
+        ]
+    }
+
+    //*******************************************
+    // HICPRO
+    withName: 'BOWTIE2_ALIGN' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/mapping" },
+            mode: 'copy',
+            enabled: params.save_aligned_intermediates
+        ]
+        ext.prefix = { "${meta.id}_${meta.chunk}_${meta.mates}" }
+        ext.args = params.bwt2_opts_end2end ?: ''
+        ext.args2 = !params.dnase ? "-F 4" :""
+    }
+
+    withName: 'TRIM_READS' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/mapping/" },
+            mode: 'copy',
+            enabled: params.save_aligned_intermediates
+        ]
+    }
+
+    withName: 'BOWTIE2_ALIGN_TRIMMED' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/mapping" },
+            mode: 'copy',
+            enabled: params.save_aligned_intermediates
+        ]
+        ext.prefix = { "${meta.id}_${meta.chunk}_${meta.mates}_trimmed" }
+        ext.args = params.bwt2_opts_trimmed ?: ''
+        ext.args2 = ""
+    }
+
+    withName: 'MERGE_BOWTIE2' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/mapping" },
+            mode: 'copy',
+            enabled: params.save_aligned_intermediates
         ]
+        ext.prefix = { "${meta.id}_${meta.chunk}_${meta.mates}" }
     }
 
+    withName: 'COMBINE_MATES' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/mapping" },
+            mode: 'copy',
+            pattern: '*.bam'
+        ]
+        ext.args = [
+            "-t",
+            params.keep_multi ? "--multi" : "",
+            params.min_mapq ? "-q ${params.min_mapq}" : ""
+        ].join(' ').trim()
+        ext.prefix = { "${meta.id}_${meta.chunk}" }
+    }
+
+    withName: 'GET_VALID_INTERACTION' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/valid_pairs" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename},
+            mode: 'copy',
+            enabled: params.save_pairs_intermediates
+        ]
+        ext.args = { [
+            params.min_cis_dist > 0 ? " -d ${params.min_cis_dist}" : '',
+            params.min_insert_size > 0 ?  " -s ${params.min_insert_size}" : '',
+            params.max_insert_size > 0 ? " -l ${params.max_insert_size}" : '',
+            params.min_restriction_fragment_size > 0 ? " -t ${params.min_restriction_fragment_size}" : '',
+            params.max_restriction_fragment_size > 0 ? " -m ${params.max_restriction_fragment_size}" : '',
+            params.save_interaction_bam ? " --sam" : ''
+        ].join(' ').trim() }
+    }
+
+    withName: 'GET_VALID_INTERACTION_DNASE' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/valid_pairs" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy',
+            enabled: params.save_pairs_intermediates
+        ]
+        ext.args = { params.min_cis_dist > 0 ? " -d ${params.min_cis_dist}" : "" }
+    }
+
+    withName: 'MERGE_VALID_INTERACTION' {
+        publishDir = [
+            [
+                path: { "${params.outdir}/hicpro/stats/${meta.id}" },
+                mode: 'copy',
+                pattern: "*stat"
+            ],
+            [
+                path: { "${params.outdir}/hicpro/valid_pairs" },
+                mode: 'copy',
+                saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+                pattern: "*Pairs"
+            ]
+        ]
+        ext.args = { params.keep_dups ? '' : '-d' }
+    }
+
+    withName: 'MERGE_STATS' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/stats/${meta.id}" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy',
+            pattern: "*stat"
+        ]
+    }
+
+    withName: 'HICPRO2PAIRS' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/valid_pairs/pairix/" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy'
+        ]
+    }
+
+    withName: 'BUILD_CONTACT_MAPS' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/matrix/raw" },
+            mode: 'copy',
+            enabled: params.hicpro_maps
+        ]
+        ext.prefix = { "${meta.id}.${resolution}" }
+    }
+
+    withName: 'ICE_NORMALIZATION' {
+        publishDir = [
+            path: { "${params.outdir}/hicpro/matrix/iced" },
+            mode: 'copy',
+            enabled: params.hicpro_maps
+        ]
+    }
+
+    //*****************************************
+    // QUALITY METRICS
+
+    withName: 'HIC_PLOT_DIST_VS_COUNTS'{
+        publishDir = [
+            path: { "${params.outdir}/distance_decay/" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy'
+        ]
+    }
+
+    //*****************************************
+    // COOLER
+
+    withName: 'COOLER_MAKEBINS' {
+        publishDir = [
+            path: { "${params.outdir}/contact_maps/bins/" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy'
+        ]
+        ext.prefix={ "cooler_bins_${cool_bin}" }
+    }
+
+    withName: 'COOLER_CLOAD' {
+        publishDir = [
+            path: { "${params.outdir}/contact_maps/cool/" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy',
+            enabled : params.save_raw_maps
+        ]
+        ext.prefix = { "${meta.id}.${cool_bin}" }
+        ext.args = "pairs -c1 2 -p1 3 -c2 4 -p2 5"
+    }
+
+    withName: 'COOLER_BALANCE' {
+        publishDir = [
+            path: { "${params.outdir}/contact_maps/cool/" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy'
+        ]
+        ext.args = '--force'
+        ext.prefix = { "${cool.baseName}_balanced" }
+    }
+
+    withName: 'COOLER_DUMP' {
+        publishDir = [
+            enabled: false
+        ]
+        ext.prefix = { "${cool.baseName}" }
+        ext.args = "--one-based-ids --balanced --na-rep 0"
+    }
+
+    withName:'SPLIT_COOLER_DUMP' {
+        publishDir = [
+            [
+                path: { "${params.outdir}/contact_maps/txt/" },
+                mode: 'copy',
+                pattern: "*_raw.txt",
+                enabled: params.save_raw_maps
+            ],
+            [
+                path: { "${params.outdir}/contact_maps/txt/" },
+                mode: 'copy',
+                pattern: "*_balanced.txt"
+            ]
+        ]
+    }
+
+    withName: 'COOLER_ZOOMIFY' {
+        publishDir = [
+            path: { "${params.outdir}/contact_maps/cool/" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy'
+        ]
+        ext.args = "--balance"
+    }
+
+    //********************************
+    // COMPARTMENTS
+
+    withName: 'COOLTOOLS_EIGSCIS' {
+        publishDir = [
+            path: { "${params.outdir}/compartments/" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy'
+        ]
+        ext.args = '--bigwig'
+        ext.prefix = { "${meta.id}.${resolution}" }
+    }
+
+    //********************************
+    // TADS
+
+    withName: 'COOLTOOLS_INSULATION' {
+        publishDir = [
+            path: { "${params.outdir}/tads/insulation/" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy'
+        ]
+        ext.args = '15 25 50 --window-pixels'
+        ext.prefix = { "${cool.baseName}" }
+    }
+
+    withName: 'HIC_FIND_TADS' {
+        publishDir = [
+            path: { "${params.outdir}/tads/hicExplorer" },
+            saveAs: { filename -> filename.equals('versions.yml') ? null : filename },
+            mode: 'copy'
+        ]
+        ext.args = '--correctForMultipleTesting fdr'
+        ext.prefix = { "${cool.baseName}" }
+    }
 }
diff --git a/conf/public_aws_ecr.config b/conf/public_aws_ecr.config
new file mode 100644
index 0000000000000000000000000000000000000000..4979017e760018adb5923656635506aca6540012
--- /dev/null
+++ b/conf/public_aws_ecr.config
@@ -0,0 +1,57 @@
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    AWS ECR Config
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    Config to set public AWS ECR images wherever possible
+    This improves speed when running on AWS infrastructure.
+    Use this as an example template when using your own private registry.
+----------------------------------------------------------------------------------------
+*/
+
+docker.registry = 'public.ecr.aws'
+podman.registry = 'public.ecr.aws'
+
+process {
+    withName: '.*:BOWTIE2_ALIGN' {
+        container = 'quay.io/biocontainers/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:a0ffedb52808e102887f6ce600d092675bf3528a-0'
+    }
+    withName: '.*:BOWTIE2_ALIGN_TRIMMED' {
+        container = 'quay.io/biocontainers/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:a0ffedb52808e102887f6ce600d092675bf3528a-0'
+    }
+    withName: '.*:BUILD_CONTACT_MAPS' {
+        container = 'quay.io/nf-core/ubuntu:20.04'
+    }
+    withName: '.*:COMBINE_MATES' {
+        container = 'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'
+    }
+    withName: '.*:COOLTOOLS_EIGSCIS' {
+        container = 'quay.io/biocontainers/mulled-v2-c81d8d6b6acf4714ffaae1a274527a41958443f6:cc7ea58b8cefc76bed985dcfe261cb276ed9e0cf-0'
+    }
+    withName: '.*:GET_RESTRICTION_FRAGMENTS' {
+        container = 'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'
+    }
+    withName: '.*:GET_VALID_INTERACTION' {
+        container = 'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'
+    }
+    withName: '.*:GET_VALID_INTERACTION_DNASE' {
+        container = 'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'
+    }
+    withName: '.*:ICE_NORMALIZATION' {
+        container = 'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'
+    }
+    withName: '.*:MERGE_STATS' {
+        container = 'quay.io/biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'
+    }
+    withName: '.*:MERGE_VALID_INTERACTION' {
+        container = 'quay.io/nf-core/ubuntu:20.04'
+    }
+    withName: '.*:SAMPLESHEET_CHECK' {
+        container = 'quay.io/biocontainers/python:3.8.3'
+    }
+    withName: '.*:SPLIT_COOLER_DUMP' {
+        container = 'quay.io/nf-core/ubuntu:20.04'
+    }
+    withName: '.*:TRIM_READS' {
+        container = 'quay.io/nf-core/ubuntu:20.04'
+    }
+}
diff --git a/conf/test.config b/conf/test.config
index f7064d922f5a15fa4993f6f292c1dc332cc45c01..1501b027bf9a0af965f021c6d85604de690834ff 100644
--- a/conf/test.config
+++ b/conf/test.config
@@ -11,19 +11,32 @@
 */
 
 params {
-    config_profile_name        = 'Test profile'
+    config_profile_name = 'Hi-C test data from Schalbetter et al. (2017)'
     config_profile_description = 'Minimal test dataset to check pipeline function'
 
-    // Limit resources so that this can run on GitHub Actions
-    max_cpus   = 2
-    max_memory = '6.GB'
-    max_time   = '6.h'
+    // Limit resources so that this can run on Travis
+    max_cpus = 2
+    max_memory = 4.GB
+    max_time = 1.h
 
     // Input data
-    // TODO nf-core: Specify the paths to your test data on nf-core/test-datasets
-    // TODO nf-core: Give any required params for the test so that command line flags are not needed
-    input  = 'https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_test_illumina_amplicon.csv'
+    input = "${baseDir}/assets/samplesheet.csv"
 
-    // Genome references
-    genome = 'R64-1-1'
+    // Annotations
+    fasta = 'https://github.com/nf-core/test-datasets/raw/hic/reference/W303_SGD_2015_JRIU00000000.fsa'
+    digestion = 'hindiii'
+    min_mapq = 10
+    min_restriction_fragment_size = 100
+    max_restriction_fragment_size = 100000
+    min_insert_size = 100
+    max_insert_size = 600
+
+    bin_size = '2000,1000'
+    res_dist_decay = '1000'
+    res_tads = '1000'
+    tads_caller = 'insulation,hicexplorer'
+    res_compartments = '2000'
+
+    // Ignore `--input` as otherwise the parameter validation will throw an error
+    schema_ignore_params = 'genomes,digest,input_paths,input'
 }
diff --git a/conf/test_full.config b/conf/test_full.config
index 4c22dc27b8ca7583a09a997f01d0d833c43ee828..0a3a68ff20677cea809b7d9f38dbe9976e885395 100644
--- a/conf/test_full.config
+++ b/conf/test_full.config
@@ -6,7 +6,6 @@
 
     Use as follows:
         nextflow run nf-core/hic -profile test_full,<docker/singularity> --outdir <OUTDIR>
-
 ----------------------------------------------------------------------------------------
 */
 
@@ -17,10 +16,14 @@ params {
     config_profile_description = 'Full test dataset to check pipeline function'
 
     // Input data for full size test
-    // TODO nf-core: Specify the paths to your full test data ( on nf-core/test-datasets or directly in repositories, e.g. SRA)
-    // TODO nf-core: Give any required params for the test so that command line flags are not needed
-    input = 'https://raw.githubusercontent.com/nf-core/test-datasets/viralrecon/samplesheet/samplesheet_full_illumina_amplicon.csv'
+    input = 'https://raw.githubusercontent.com/nf-core/test-datasets/hic/samplesheet/samplesheet_HiC_mESC_full_test.csv'
 
     // Genome references
-    genome = 'R64-1-1'
+    genome = 'mm10'
+
+    // Other options
+    digestion = 'dpnii'
+    bin_size = '40000,250000,500000,1000000'
+    res_compartments = '500000,250000'
+    res_tads = '40000,20000'
 }
diff --git a/docs/images/nfcore-hic_logo.png b/docs/images/nfcore-hic_logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..d75e44b92cc77b61d1cc79747cfa390101784a03
Binary files /dev/null and b/docs/images/nfcore-hic_logo.png differ
diff --git a/docs/images/nfcore-hic_logo.svg b/docs/images/nfcore-hic_logo.svg
new file mode 100644
index 0000000000000000000000000000000000000000..7a2086987e69e1529baf40fe3ea526c174b85ac1
--- /dev/null
+++ b/docs/images/nfcore-hic_logo.svg
@@ -0,0 +1,205 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   enable-background="new 0 0 1150.9 517"
+   version="1.1"
+   viewBox="0 0 1456.7841 522.44342"
+   xml:space="preserve"
+   id="svg2"
+   inkscape:version="0.91 r13725"
+   sodipodi:docname="EmptyName_logo.svg"
+   width="1456.7842"
+   height="522.44342"><sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1920"
+     inkscape:window-height="1015"
+     id="namedview75"
+     showgrid="false"
+     inkscape:zoom="0.35757767"
+     inkscape:cx="253.20897"
+     inkscape:cy="13.773735"
+     inkscape:window-x="1920"
+     inkscape:window-y="724"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer3"
+     fit-margin-left="62.25"
+     fit-margin-right="62.25"
+     fit-margin-top="62.25"
+     fit-margin-bottom="62.25" /><metadata
+     id="metadata4"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs6"><clipPath
+       id="e"><path
+         d="m 280.17,136.33 -21.5,-21.584 61,0 0,21.584 -39.5,0 z"
+         id="path9"
+         inkscape:connector-curvature="0" /></clipPath><linearGradient
+       id="f"
+       x2="1"
+       gradientTransform="matrix(37.935819,29.638391,-29.638391,37.935819,295.72019,166.19562)"
+       gradientUnits="userSpaceOnUse"><stop
+         stop-color="#0c542a"
+         offset="0"
+         id="stop12" /><stop
+         stop-color="#0c542a"
+         offset=".21472"
+         id="stop14" /><stop
+         stop-color="#25af64"
+         offset=".57995"
+         id="stop16" /><stop
+         stop-color="#25af64"
+         offset=".84663"
+         id="stop18" /><stop
+         stop-color="#25af64"
+         offset="1"
+         id="stop20" /></linearGradient></defs><style
+     type="text/css"
+     id="style22">
+	.st0{fill:#24AF63;}
+	.st1{font-family:'Maven Pro';}
+	.st1{font-weight:'bold';}
+	.st2{font-size:209.8672px;}
+	.st3{fill:#21AF62;}
+	.st4{fill:#ECDC86;}
+	.st5{fill:#A0918F;}
+	.st6{fill:#3F2B29;}
+	.st7{fill:#396E35;}
+	.st8{fill:url(#d);}
+</style><linearGradient
+     id="d"
+     x1="295.45999"
+     x2="333.34"
+     y1="150.75"
+     y2="180.35001"
+     gradientUnits="userSpaceOnUse"><stop
+       stop-color="#0D552B"
+       offset=".2147"
+       id="stop41" /><stop
+       stop-color="#176837"
+       offset=".311"
+       id="stop43" /><stop
+       stop-color="#1F8448"
+       offset=".4609"
+       id="stop45" /><stop
+       stop-color="#239A56"
+       offset=".604"
+       id="stop47" /><stop
+       stop-color="#24A860"
+       offset=".7361"
+       id="stop49" /><stop
+       stop-color="#25AF64"
+       offset=".8466"
+       id="stop51" /></linearGradient><g
+     inkscape:groupmode="layer"
+     id="layer2"
+     inkscape:label="Icon"
+     style="display:inline"
+     transform="translate(5.3761467,0)"><g
+       id="g4209"><path
+         style="fill:#24af63"
+         inkscape:connector-curvature="0"
+         id="path24"
+         d="m 1084.1,163.75 0,3.6 c -0.1,0 -0.1,0 -0.2,0.1 l -1.8,-1.5 c -4,-3.4 -8.3,-6.4 -13.1,-8.8 -0.8,-0.4 -1.6,-0.9 -2.5,-1.1 -0.1,-0.1 -0.2,-0.2 -0.3,-0.2 -1.8,-0.7 -3.6,-1.3 -5.5,-1.8 -4.1,-0.9 -8.2,-1 -12.3,-0.2 -5.3,1.1 -10,3.4 -14.5,6.4 -4.4,3 -8.4,6.5 -12.1,10.2 -0.7,0.7 -0.7,0.7 -1.1,-0.2 -2,-4.1 -4.2,-8.1 -6.9,-11.8 -2.1,-2.8 -4.4,-5.4 -7.4,-7.2 -3,-1.9 -6.3,-2.6 -9.8,-1.7 -4.3,1 -7.8,3.6 -11.1,6.4 -2,1.5 -3.8,3.3 -5.6,5 -1.7,1.5 -3.3,3 -5,4.5 -0.3,0.3 -0.5,0.3 -0.8,0 -1.7,-1.8 -3.5,-3.4 -5.6,-4.5 -3.1,-1.7 -6.3,-1.9 -9.6,-0.8 -2.8,0.9 -5.2,2.4 -7.7,4 -1,0.6 -1.9,1.3 -2.9,1.8 l 0,-0.2 c 0.1,-0.2 0.1,-0.4 0.1,-0.6 0.2,-4.4 0.5,-8.9 1.2,-13.3 1,-6.1 2.5,-12 5.2,-17.5 2,-4.1 4.7,-7.9 8.1,-11 4.5,-4.1 9.8,-6.7 15.6,-8.3 6.3,-1.8 12.7,-2.6 19.2,-2.9 2.6,-0.1 5.1,-0.2 7.7,-0.3 1.3,0.5 2.6,0.8 3.9,1.2 1.9,0.6 3.8,1.2 5.7,1.7 1,0.4 1.9,0.7 2.9,1.1 3.7,1.3 7.3,3 10.4,5.5 0.8,0.6 1.6,1.3 2.4,2 -0.2,-0.6 -0.4,-1.1 -0.6,-1.7 -1.4,-3.7 -3.5,-6.7 -6.9,-8.8 -1.4,-0.9 -2.9,-1.5 -4.4,-2.3 0.1,0 0.3,0 0.4,-0.1 4.5,-0.8 9.1,-1.2 13.7,-1.4 3.9,-0.2 7.9,-0.1 11.8,0.3 4.6,0.5 9.1,1.4 13.4,3 6.4,2.4 11.9,6.1 16.2,11.5 3.7,4.7 6.1,10.1 7.6,15.9 1.5,5.7 2.1,11.6 2.3,17.5 -0.1,2.1 -0.1,4.3 -0.1,6.5 z"
+         class="st0" /><path
+         style="fill:#ecdc86"
+         inkscape:connector-curvature="0"
+         id="path26"
+         d="m 1084.1,157.15 0.1,0 0,6.6 -0.1,0 0,-6.6 z"
+         class="st4" /><path
+         style="fill:#a0918f"
+         inkscape:connector-curvature="0"
+         id="path28"
+         d="m 1047.6,62.25 0,0.1 -4.5,0 0,-0.1 4.5,0 z"
+         class="st5" /><path
+         style="fill:#24af63"
+         inkscape:connector-curvature="0"
+         id="path30"
+         d="m 1050.5,250.65 c 2.5,-1 4.9,-2.3 7.3,-3.6 2.8,-1.7 5.4,-3.5 8,-5.4 2.2,-1.6 4.3,-3.3 6.4,-5.1 l 3.6,-3 c 0.2,-0.2 0.2,-0.1 0.3,0.1 0.4,1.6 0.7,3.3 1.1,5 0.5,2.3 0.8,4.6 1.1,6.9 0.3,2.7 0.4,5.3 0.2,8 -0.2,3.3 -0.8,6.6 -2,9.7 -0.7,1.9 -1.6,3.7 -2.7,5.4 -1.4,2.2 -3,4.2 -5,5.9 -2.3,2.1 -4.9,3.9 -7.7,5.4 -3.7,2.1 -7.7,3.6 -11.8,4.8 -3.9,1.2 -7.9,2 -11.9,2.7 -1.1,0.2 -2.2,0.4 -3.3,0.4 -2.3,-0.1 -4.6,-0.6 -6.8,-1.4 -3.3,-1.3 -6.2,-3.3 -9.5,-4.8 -1.8,-0.8 -3.6,-1.4 -5.5,-1.5 -2.5,-0.2 -4.6,0.7 -6.4,2.4 l -3.9,3.9 c -2.2,2.2 -4.8,3.7 -7.9,4.2 -2.1,0.3 -4.1,0.2 -6.2,-0.1 -2.9,-0.4 -5.7,-1.1 -8.4,-1.9 -4,-1.3 -7.7,-3.1 -11.1,-5.7 -3.2,-2.4 -5.7,-5.4 -7.8,-8.8 -2.1,-3.5 -3.3,-7.2 -4.2,-11.1 -0.4,-1.7 -0.6,-3.5 -0.8,-5.2 -0.3,-2.5 -0.4,-4.9 -0.3,-7.4 0.1,-3.5 0.4,-6.9 0.9,-10.4 0.4,0.4 0.8,0.7 1.1,1 2.2,2 4.7,3.8 7.3,5.4 2.9,1.7 6.1,3.1 9.4,4 2.2,0.6 4.5,1 6.8,1.1 1.9,0.2 3.8,0.2 5.7,0.1 2.2,-0.1 4.5,-0.3 6.7,-0.9 0.3,0 0.6,0 0.8,-0.1 2,-0.4 4,-0.9 6,-1.5 2.3,-0.7 4.5,-1.4 6.7,-2.2 2.1,-0.8 4.3,-1.7 6.4,-2.6 0.6,-0.3 1,-0.2 1.5,0.2 3.5,2.7 7.3,5 11.4,6.7 4.6,1.8 9.3,2.7 14.2,2.3 3.6,-0.7 7,-1.6 10.3,-2.9 z"
+         class="st0" /><path
+         style="fill:#ecdc86"
+         inkscape:connector-curvature="0"
+         id="path32"
+         d="m 1050.5,250.65 c -3.3,1.3 -6.7,2.2 -10.2,2.5 -4.9,0.4 -9.6,-0.5 -14.2,-2.3 -4.1,-1.6 -7.9,-3.9 -11.4,-6.7 -0.5,-0.4 -0.9,-0.5 -1.5,-0.2 -2.1,0.9 -4.2,1.8 -6.4,2.6 -2.2,0.8 -4.4,1.6 -6.7,2.2 -2,0.6 -4,1 -6,1.5 -0.3,0.1 -0.6,0.1 -0.8,0.1 0.7,-0.8 1.4,-1.6 2.1,-2.4 2.8,-3.2 4.8,-6.9 5.9,-11.1 1.6,-5.6 3.2,-11.3 4.6,-17 1,-4.2 1.8,-8.4 2.4,-12.7 0.4,-3.1 1,-14.9 0.8,-17.7 -0.5,-8.6 -2.4,-16.8 -5.9,-24.7 -2.1,-4.7 -5.7,-7.9 -10.7,-9.2 -2.2,-0.6 -4.4,-0.4 -6.5,0.3 -0.2,0.1 -0.3,0.2 -0.5,0.1 3.3,-2.8 6.7,-5.4 11.1,-6.4 3.5,-0.8 6.8,-0.2 9.8,1.7 3,1.9 5.3,4.4 7.4,7.2 2.7,3.7 4.9,7.7 6.9,11.8 0.4,0.9 0.4,0.9 1.1,0.2 3.7,-3.8 7.7,-7.3 12.1,-10.2 4.4,-3 9.2,-5.3 14.5,-6.4 4.1,-0.8 8.2,-0.7 12.3,0.2 1.9,0.4 3.7,1 5.5,1.8 0.1,0.1 0.3,0.1 0.3,0.2 -5.3,0.1 -9.8,2.1 -13.9,5.2 -2,1.5 -3.8,3.2 -5.2,5.3 -1.1,1.7 -2.1,3.6 -2.9,5.5 -1.8,3.8 -3.3,7.8 -4.4,11.9 -0.9,3.5 -1.5,7.1 -1.8,10.7 -0.2,2.8 -0.3,5.6 -0.2,8.4 0.1,3.4 0.5,6.8 0.9,10.3 0.7,5.7 1.7,11.4 2.7,17.1 0.5,3.1 0.9,6.3 1.5,9.5 0.7,4.6 3.3,8 7,10.6 0,-0.1 0.2,0 0.3,0.1 z"
+         class="st4" /><path
+         style="fill:#3f2b29"
+         inkscape:connector-curvature="0"
+         id="path34"
+         d="m 1043.1,62.35 4.5,0 c 3.6,0.2 7.2,0.8 10.6,2 2.7,0.9 3.3,2.7 1.7,5 -1.1,1.6 -2.7,2.8 -4.4,3.9 -2.1,1.4 -4.4,2.6 -6.9,3.5 -2.5,1 -4.9,0 -6.5,-2.5 -0.5,-0.8 -0.9,-1.6 -1.1,-2.5 -0.1,-0.3 -0.2,-0.4 -0.5,-0.4 -5.6,-1 -10.6,0.3 -14.7,4.3 -3.4,3.2 -5.4,7.3 -6.8,11.7 -1.3,4 -1.9,8 -2.1,12.2 -0.2,3.7 0.1,7.4 0.6,11 0.1,0.6 0.3,1.2 0.3,1.9 0.1,0.8 -0.2,1.5 -0.8,1.9 -0.7,0.5 -1.5,0.4 -2.3,0.4 -1.9,-0.6 -3.8,-1.2 -5.7,-1.7 l 0,-1.3 c 0,-2 0,-3.9 0.1,-5.9 0.4,-7.7 1.6,-15.3 4.6,-22.5 2.2,-5.4 5.4,-10.1 9.9,-13.8 3.7,-3.1 7.9,-5.1 12.6,-6.2 2.4,-0.6 4.6,-0.9 6.9,-1 z"
+         class="st6" /><path
+         style="fill:#396e35"
+         inkscape:connector-curvature="0"
+         id="path36"
+         d="m 1014.8,114.65 c 0.8,0 1.6,0.1 2.3,-0.4 0.7,-0.5 0.9,-1.2 0.8,-1.9 -0.1,-0.6 -0.2,-1.3 -0.3,-1.9 0.4,0 0.7,-0.1 1.1,-0.1 1.4,0.8 2.9,1.5 4.4,2.3 3.4,2.1 5.5,5.1 6.9,8.8 0.2,0.6 0.4,1.1 0.6,1.7 -0.8,-0.7 -1.6,-1.4 -2.4,-2 -3.2,-2.4 -6.7,-4.1 -10.4,-5.5 -1.1,-0.3 -2,-0.6 -3,-1 z"
+         class="st7" /><path
+         style="fill:#396e35"
+         inkscape:connector-curvature="0"
+         id="path38"
+         d="m 1009.1,111.65 0,1.3 c -1.3,-0.4 -2.6,-0.7 -3.9,-1.2 1.4,-0.1 2.7,-0.1 3.9,-0.1 z"
+         class="st7" /></g></g><g
+     inkscape:groupmode="layer"
+     id="layer3"
+     inkscape:label="Text"
+     style="display:inline"
+     transform="translate(5.3761467,0)"><text
+       x="48.898899"
+       y="241.24541"
+       font-size="209.87px"
+       font-weight="bold"
+       id="text53"
+       style="font-weight:bold;font-size:209.86999512px;font-family:'Maven Pro'"><tspan
+         class="st0 st1 st2"
+         x="48.898899"
+         y="241.24541"
+         font-size="209.87px"
+         font-weight="bold"
+         id="tspan55"
+         style="font-weight:bold;font-size:209.86720276px;font-family:'Maven Pro';fill:#24af63">nf-<tspan
+   id="tspan57"
+   style="fill:#000000" /></tspan></text>
+<text
+       x="357.14139"
+       y="241.24541"
+       font-size="209.87px"
+       font-weight="bold"
+       id="text69"
+       style="font-weight:bold;font-size:209.86999512px;font-family:'Maven Pro'"><tspan
+         class="st0 st1 st2"
+         x="357.14139"
+         y="241.24541"
+         font-size="209.87px"
+         font-weight="bold"
+         id="tspan71"
+         style="font-weight:bold;font-size:209.86720276px;font-family:'Maven Pro';fill:#24af63"><tspan
+           id="tspan73"
+           style="fill:#000000">core/</tspan></tspan></text>
+<text
+       x="-260.05042"
+       y="457.04541"
+       font-weight="bold"
+       id="text59"
+       style="font-weight:bold;font-family:'Maven Pro'"><tspan
+         class="st1 st2"
+         x="47.849564"
+         y="457.04541"
+         font-size="209.87px"
+         font-weight="bold"
+         id="tspan61"
+         style="font-weight:bold;font-size:209.86720276px;font-family:'Maven Pro'">hic</tspan></text>
+<path
+       d="m 300.43725,166.1155 -21.53224,21.61638 61.0915,0 0,-21.61638 -39.55926,0 z"
+       id="path67"
+       inkscape:connector-curvature="0"
+       style="fill:url(#f)" /></g></svg>
\ No newline at end of file
diff --git a/docs/output.md b/docs/output.md
index 0a1b0368b9e6dc37194a6524119f7b761facd612..1086b0371c3be007b813fdd582e5e79dde1000f6 100644
--- a/docs/output.md
+++ b/docs/output.md
@@ -3,39 +3,273 @@
 ## Introduction
 
 This document describes the output produced by the pipeline. Most of the plots are taken from the MultiQC report, which summarises results at the end of the pipeline.
-
 The directories listed below will be created in the results directory after the pipeline has finished. All paths are relative to the top-level results directory.
 
-<!-- TODO nf-core: Write this documentation describing your workflow's output -->
-
 ## Pipeline overview
 
 The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes data using the following steps:
 
-- [FastQC](#fastqc) - Raw read QC
-- [MultiQC](#multiqc) - Aggregate report describing results and QC from the whole pipeline
-- [Pipeline information](#pipeline-information) - Report metrics generated during the workflow execution
+- [From raw data to valid pairs](#from-raw-data-to-valid-pairs)
+  - [HiC-Pro](#hicpro)
+    - [Reads alignment](#reads-alignment)
+    - [Valid pairs detection](#valid-pairs-detection)
+    - [Duplicates removal](#duplicates-removal)
+    - [Contact maps](#hicpro-contact-maps)
+- [Hi-C contact maps](#hic-contact-maps)
+- [Downstream analysis](#downstream-analysis)
+  - [Distance decay](#distance-decay)
+  - [Compartments calling](#compartments-calling)
+  - [TADs calling](#tads-calling)
+- [MultiQC](#multiqc) - aggregate report and quality controls, describing
+  results of the whole pipeline
+- [Export](#exprot) - additionnal export for compatibility with downstream
+  analysis tool and visualisation
 
-### FastQC
+## From raw data to valid pairs
 
-<details markdown="1">
-<summary>Output files</summary>
+### HiC-Pro
 
-- `fastqc/`
-  - `*_fastqc.html`: FastQC report containing quality metrics.
-  - `*_fastqc.zip`: Zip archive containing the FastQC report, tab-delimited data file and plot images.
+The current version is mainly based on the
+[HiC-Pro](https://github.com/nservant/HiC-Pro) pipeline.
+For details about the workflow, see
+[Servant et al. 2015](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-015-0831-x)
 
-</details>
+#### Reads alignment
+
+Using Hi-C data, each reads mate has to be independently aligned on the
+reference genome.
+The current workflow implements a two steps mapping strategy. First, the reads
+are aligned using an end-to-end aligner.
+Second, reads spanning the ligation junction are trimmmed from their 3' end,
+and aligned back on the genome.
+Aligned reads for both fragment mates are then paired in a single paired-end
+BAM file.
+Singletons and low quality mapped reads are filtered (`--min_mapq`).
+Note that if the `--dnase` mode is activated, HiC-Pro will skip the second
+mapping step.
+
+**Output directory: `results/hicpro/mapping`**
+
+- `*bwt2pairs.bam` - final BAM file with aligned paired data
+
+if `--save_aligned_intermediates` is specified, additional mapping file results
+are available ;
+
+- `*.bam` - Aligned reads (R1 and R2) from end-to-end alignment
+- `*_unmap.fastq` - Unmapped reads after end-to-end alignment
+- `*_trimmed.fastq` - Trimmed reads after end-to-end alignment
+- `*_trimmed.bam` - Alignment of trimmed reads
+- `*bwt2merged.bam` - merged BAM file after the two-steps alignment
+- `*.mapstat` - mapping statistics per read mate
+
+Usually, a high fraction of reads is expected to be aligned on the genome
+(80-90%). Among them, we usually observe a few percent (around 10%) of step 2
+aligned reads. Those reads are chimeric fragments for which we detect a
+ligation junction. An abnormal level of chimeric reads can reflect a ligation
+issue during the library preparation.
+The fraction of singleton or low quality reads depends on the genome complexity and
+the fraction of unmapped reads. The fraction of singleton is usually close to
+the sum of unmapped R1 and R2 reads, as it is unlikely that both mates from the
+same pair were unmapped.
+
+#### Valid pairs detection with HiC-Pro
+
+Each aligned reads can be assigned to one restriction fragment according to the
+reference genome and the digestion protocol.
+
+Invalid pairs are classified as follow:
+
+- Dangling end, i.e. unligated fragments (both reads mapped on the same
+  restriction fragment)
+- Self circles, i.e. fragments ligated on themselves (both reads mapped on the
+  same restriction fragment in inverted orientation)
+- Religation, i.e. ligation of juxtaposed fragments
+- Filtered pairs, i.e. any pairs that do not match the filtering criteria on
+  inserts size, restriction fragments size
+- Dumped pairs, i.e. any pairs for which we were not able to reconstruct the
+  ligation product.
+
+Only valid pairs involving two different restriction fragments are used to
+build the contact maps.
+Duplicated valid pairs associated to PCR artefacts are discarded
+(see `--keep_dup` to not discard them).
+
+In case of Hi-C protocols that do not require a restriction enzyme such as
+DNase Hi-C or micro Hi-C, the assignment to a restriction is not possible
+(see `--dnase`).
+Short range interactions that are likely to be spurious ligation products
+can thus be discarded using the `--min_cis_dist` parameter.
+
+**Output directory: `results/hicpro/valid_pairs`**
+
+- `*.validPairs` - List of valid ligation products
+- `*.DEpairs` - List of dangling-end products
+- `*.SCPairs` - List of self-circle products
+- `*.REPairs` - List of religation products
+- `*.FiltPairs` - List of filtered pairs
+- `*RSstat` - Statitics of number of read pairs falling in each category
+
+Of note, these results are saved only if `--save_pairs_intermediates` is used.  
+The `validPairs` are stored using a simple tab-delimited text format ;
+
+```bash
+read name / chr_reads1 / pos_reads1 / strand_reads1 / chr_reads2 / pos_reads2 /
+strand_reads2 / fragment_size / res frag name R1 / res frag R2 / mapping qual R1
+/ mapping qual R2
+```
+
+The ligation efficiency can be assessed using the filtering of valid and
+invalid pairs. As the ligation is a random process, 25% of each valid ligation
+class is expected. In the same way, a high level of dangling-end or self-circle
+read pairs is associated with a low quality experiment, and reveals a problem
+during the digestion, fill-in or ligation steps.
+
+In the context of Hi-C protocol without restriction enzyme, this analysis step
+is skipped. The aligned pairs are therefore directly used to generate the
+contact maps. A filter of the short range contact (typically <1kb) is
+recommanded as this pairs are likely to be self ligation products.
+
+#### Duplicates removal
+
+Note that `validPairs` file are generated per reads chunck (and saved only if
+`--save_pairs_intermediates` is specified).
+These files are then merged in the `allValidPairs` file, and duplicates are
+removed (see `--keep_dups` to disable duplicates filtering).
+
+**Output directory: `results/hicpro/valid_pairs`**
+
+- `*allValidPairs` - combined valid pairs from all read chunks
+
+Additional quality controls such as fragment size distribution can be extracted
+from the list of valid interaction products.
+We usually expect to see a distribution centered around 300 bp which corresponds
+to the paired-end insert size commonly used.
+The fraction of duplicates is also presented. A high level of duplication
+indicates a poor molecular complexity and a potential PCR bias.
+Finally, an important metric is to look at the fraction of intra and
+inter-chromosomal interactions, as well as long range (>20kb) versus short
+range (<20kb) intra-chromosomal interactions.
+
+#### Pairs file
+
+`.pairs` is a standard tabular format proposed by the 4DN Consortium
+for storing DNA contacts detected in a Hi-C experiment
+(see https://pairtools.readthedocs.io/en/latest/formats.html).
+This format is the entry point of the downstream steps of the pipeline after
+detection of valid pairs.
+
+**Output directory: `results/hicpro/valid_pairs/pairix`**
+
+- `*pairix` - compressed and indexed pairs file
+
+#### Statistics
+
+Various statistics files are generated all along the data processing.
+All results are available in `results/hicpro/stats`.
+
+**Output directory: `results/hicpro/stats`**
+
+- \*mapstat - mapping statistics per read mate
+- \*pairstat - R1/R2 pairing statistics
+- \*RSstat - Statitics of number of read pairs falling in each category
+- \*mergestat - statistics about duplicates removal and valid pairs information
+
+#### Contact maps
+
+Intra and inter-chromosomal contact maps are built for all specified resolutions.
+The genome is split into bins of equal size. Each valid interaction is
+associated with the genomic bins to generate the raw maps.
+In addition, Hi-C data can contain several sources of biases which has to be
+corrected.
+The HiC-Pro workflow uses the [ìced](https://github.com/hiclib/iced) and
+[Varoquaux and Servant, 2018](http://joss.theoj.org/papers/10.21105/joss.01286)
+python package which proposes a fast implementation of the original ICE
+normalisation algorithm (Imakaev et al. 2012), making the assumption of equal
+visibility of each fragment.
+
+Importantly, the HiC-Pro maps are generated only if the `--hicpro_maps` option
+is specified on the command line.
+
+**Output directory: `results/hicpro/matrix`**
+
+- `*.matrix` - genome-wide contact maps
+- `*_iced.matrix` - genome-wide iced contact maps
+
+The contact maps are generated for all specified resolutions
+(see `--bin_size` argument).  
+A contact map is defined by :
+
+- A list of genomic intervals related to the specified resolution (BED format).
+- A matrix, stored as standard triplet sparse format (i.e. list format).
+
+Based on the observation that a contact map is symmetric and usually sparse,
+only non-zero values are stored for half of the matrix. The user can specified
+if the 'upper', 'lower' or 'complete' matrix has to be stored. The 'asis'
+option allows to store the contacts as they are observed from the valid pairs
+files.
+
+```bash
+   A   B   10
+   A   C   23
+   B   C   24
+   (...)
+```
+
+This format is memory efficient, and is compatible with several software for
+downstream analysis.
+
+## Hi-C contact maps
+
+Contact maps are usually stored as simple txt (`HiC-Pro`), .hic (`Juicer/Juicebox`) and .(m)cool (`cooler/Higlass`) formats.
+The .cool and .hic format are compressed and indexed and usually much more efficient than the txt format.  
+In the current workflow, we propose to use the `cooler` format as a standard to build the raw and normalised maps
+after valid pairs detection as it is used by several downstream analysis and visualisation tools.
+
+Raw contact maps are therefore in **`results/contact_maps/raw`** which contains the different maps in `txt` and `cool` formats, at various resolutions.
+Normalised contact maps are stored in **`results/contact_maps/norm`** which contains the different maps in `txt`, `cool`, and `mcool` format.
+The bin coordinates used for all resolutions are available in **`results/contact_maps/bins`**.
+
+Note that `txt` contact maps generated with `cooler` are identical to those generated by `HiC-Pro`.
+However, differences can be observed on the normalised contact maps as the balancing algorithm is not exactly the same.
+
+## Downstream analysis
+
+Downstream analysis are performed from `cool` files at specified resolution.
+
+### Distance decay
+
+The distance decay plot shows the relationship between contact frequencies and genomic distance. It gives a good indication of the compaction of the genome.
+According to the organism, the slope of the curve should fit the expectation of polymer physics models.
+
+The results generated with the `HiCExplorer hicPlotDistVsCounts` tool (plot and table) are available in the **`results/dist_decay/`** folder.
+
+### Compartments calling
+
+Compartments calling is one of the most common analysis which aims at detecting A (open, active) / B (close, inactive) compartments.
+In the first studies on the subject, the compartments were called at high/medium resolution (1000000 to 250000) which is enough to call A/B compartments.
+Analysis at higher resolution has shown that these two main types of compartments can be further divided into compartments subtypes.
+
+Although different methods have been proposed for compartment calling, the standard remains the eigen vector decomposition from the normalised correlation maps.
+Here, we use the implementation available in the [`cooltools`](https://cooltools.readthedocs.io/en/lates) package.
+
+Results are available in **`results/compartments/`** folder and include :
+
+- `*cis.vecs.tsv`: eigenvectors decomposition along the genome
+- `*cis.lam.txt`: eigenvalues associated with the eigenvectors
+
+### TADs calling
 
-[FastQC](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/) gives general quality metrics about your sequenced reads. It provides information about the quality score distribution across your reads, per base sequence content (%A/T/G/C), adapter contamination and overrepresented sequences. For further reading and documentation see the [FastQC help pages](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/).
+TADs have been described as functional units of the genome.
+While contacts between genes and regulatority elements can occur within a single TAD, contacts between TADs are much less frequent, mainly due to the presence of an insulation protein (such as CTCF) at their boundaries. Looking at Hi-C maps, TADs look like triangles around the diagonal. According to the contact map resolutions, TADs appear as hierarchical structures with a median size around 1Mb (in mammals), as well as smaller structures usually called sub-TADs of smaller size.
 
-![MultiQC - FastQC sequence counts plot](images/mqc_fastqc_counts.png)
+TADs calling remains a challenging task, and even if many methods have been proposed in the last decade, little overlap has been found between their results.
 
-![MultiQC - FastQC mean quality scores plot](images/mqc_fastqc_quality.png)
+Currently, the pipeline proposes two approaches :
 
-![MultiQC - FastQC adapter content plot](images/mqc_fastqc_adapter.png)
+- Insulation score using the [`cooltools`](https://cooltools.readthedocs.io/en/latest/cli.html#cooltools-diamond-insulation) package. Results are availabe in **`results/tads/insulation`**.
+- [`HiCExplorer TADs calling`](https://hicexplorer.readthedocs.io/en/latest/content/tools/hicFindTADs.html). Results are available at **`results/tads/hicexplorer`**.
 
-> **NB:** The FastQC plots displayed in the MultiQC report shows _untrimmed_ reads. They may contain adapter sequence and potentially regions with low quality.
+Usually, TADs results are presented as simple BED files, or bigWig files, with the position of boundaries along the genome.
 
 ### MultiQC
 
@@ -49,7 +283,7 @@ The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes d
 
 </details>
 
-[MultiQC](http://multiqc.info) is a visualization tool that generates a single HTML report summarising all samples in your project. Most of the pipeline QC results are visualised in the report and further statistics are available in the report data directory.
+[MultiQC](http://multiqc.info) is a visualisation tool that generates a single HTML report summarising all samples in your project. Most of the pipeline QC results are visualised in the report and further statistics are available in the report data directory.
 
 Results generated by MultiQC collate pipeline QC from supported tools e.g. FastQC. The pipeline has special steps which also allow the software versions to be reported in the MultiQC output for future traceability. For more information about how to use MultiQC reports, see <http://multiqc.info>.
 
diff --git a/docs/usage.md b/docs/usage.md
index 506b575a76a33e73e05b68a5ce33d70c4ceec773..4ad48da5087a541195d98f097c625aa22b5a6062 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -6,8 +6,6 @@
 
 ## Introduction
 
-<!-- TODO nf-core: Add documentation about anything specific to running your pipeline. For general topics, please point to (and add to) the main nf-core website. -->
-
 ## Samplesheet input
 
 You will need to create a samplesheet with information about the samples you would like to analyse before running the pipeline. Use this parameter to specify its location. It has to be a comma-separated file with 3 columns, and a header row as shown in the examples below.
@@ -18,7 +16,7 @@ You will need to create a samplesheet with information about the samples you wou
 
 ### Multiple runs of the same sample
 
-The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. The pipeline will concatenate the raw reads before performing any downstream analysis. Below is an example for the same sample sequenced across 3 lanes:
+The `sample` identifiers have to be the same when you have re-sequenced the same sample more than once e.g. to increase sequencing depth. Below is an example for the same sample sequenced across 3 lanes:
 
 ```console
 sample,fastq_1,fastq_2
@@ -29,19 +27,13 @@ CONTROL_REP1,AEG588A1_S1_L004_R1_001.fastq.gz,AEG588A1_S1_L004_R2_001.fastq.gz
 
 ### Full samplesheet
 
-The pipeline will auto-detect whether a sample is single- or paired-end using the information provided in the samplesheet. The samplesheet can have as many columns as you desire, however, there is a strict requirement for the first 3 columns to match those defined in the table below.
-
-A final samplesheet file consisting of both single- and paired-end data may look something like the one below. This is for 6 samples, where `TREATMENT_REP3` has been sequenced twice.
+The `nf-core-hic` pipeline is designed to work only with paired-end data. The samplesheet can have as many columns as you desire, however, there is a strict requirement for the first 3 columns to match those defined in the table below.
 
 ```console
 sample,fastq_1,fastq_2
-CONTROL_REP1,AEG588A1_S1_L002_R1_001.fastq.gz,AEG588A1_S1_L002_R2_001.fastq.gz
-CONTROL_REP2,AEG588A2_S2_L002_R1_001.fastq.gz,AEG588A2_S2_L002_R2_001.fastq.gz
-CONTROL_REP3,AEG588A3_S3_L002_R1_001.fastq.gz,AEG588A3_S3_L002_R2_001.fastq.gz
-TREATMENT_REP1,AEG588A4_S4_L003_R1_001.fastq.gz,
-TREATMENT_REP2,AEG588A5_S5_L003_R1_001.fastq.gz,
-TREATMENT_REP3,AEG588A6_S6_L003_R1_001.fastq.gz,
-TREATMENT_REP3,AEG588A6_S6_L004_R1_001.fastq.gz,
+SAMPLE_REP1,AEG588A1_S1_L002_R1_001.fastq.gz,AEG588A1_S1_L002_R2_001.fastq.gz
+SAMPLE_REP2,AEG588A2_S2_L002_R1_001.fastq.gz,AEG588A2_S2_L002_R2_001.fastq.gz
+SAMPLE_REP3,AEG588A3_S3_L002_R1_001.fastq.gz,AEG588A3_S3_L002_R2_001.fastq.gz
 ```
 
 | Column    | Description                                                                                                                                                                            |
@@ -60,7 +52,8 @@ The typical command for running the pipeline is as follows:
 nextflow run nf-core/hic --input samplesheet.csv --outdir <OUTDIR> --genome GRCh37 -profile docker
 ```
 
-This will launch the pipeline with the `docker` configuration profile. See below for more information about profiles.
+This will launch the pipeline with the `docker` configuration profile.
+See below for more information about profiles.
 
 Note that the pipeline will create the following files in your working directory:
 
@@ -116,20 +109,31 @@ To further assist in reproducbility, you can use share and re-use [parameter fil
 
 ## Core Nextflow arguments
 
-> **NB:** These options are part of Nextflow and use a _single_ hyphen (pipeline parameters use a double-hyphen).
+> **NB:** These options are part of Nextflow and use a _single_ hyphen
+> (pipeline parameters use a double-hyphen).
 
 ### `-profile`
 
-Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments.
+Use this parameter to choose a configuration profile. Profiles can give
+configuration presets for different compute environments.
 
 Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Apptainer, Conda) - see below.
 
-> We highly recommend the use of Docker or Singularity containers for full pipeline reproducibility, however when this is not possible, Conda is also supported.
+> We highly recommend the use of Docker or Singularity containers for full
+> pipeline reproducibility, however when this is not possible, Conda is also supported.
 
-The pipeline also dynamically loads configurations from [https://github.com/nf-core/configs](https://github.com/nf-core/configs) when it runs, making multiple config profiles for various institutional clusters available at run time. For more information and to see if your system is available in these configs please see the [nf-core/configs documentation](https://github.com/nf-core/configs#documentation).
+The pipeline also dynamically loads configurations from
+[https://github.com/nf-core/configs](https://github.com/nf-core/configs)
+when it runs, making multiple config profiles for various institutional
+clusters available at run time.
+For more information and to see if your system is available in these
+configs please see
+the [nf-core/configs documentation](https://github.com/nf-core/configs#documentation).
 
-Note that multiple profiles can be loaded, for example: `-profile test,docker` - the order of arguments is important!
-They are loaded in sequence, so later profiles can overwrite earlier profiles.
+Note that multiple profiles can be loaded, for example: `-profile test,docker` -
+the order of arguments is important!
+They are loaded in sequence, so later profiles can overwrite
+earlier profiles.
 
 If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended, since it can lead to different results on different machines dependent on the computer enviroment.
 
@@ -159,7 +163,9 @@ You can also supply a run name to resume a specific run: `-resume [run-name]`. U
 
 ### `-c`
 
-Specify the path to a specific config file (this is a core Nextflow command). See the [nf-core website documentation](https://nf-co.re/usage/configuration) for more information.
+Specify the path to a specific config file (this is a core Nextflow command).
+See the [nf-core website documentation](https://nf-co.re/usage/configuration)
+for more information.
 
 ## Custom configuration
 
@@ -187,7 +193,9 @@ In most cases, you will only need to create a custom config as a one-off but if
 
 See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for more information about creating your own configuration files.
 
-If you have any questions or issues please send us a message on [Slack](https://nf-co.re/join/slack) on the [`#configs` channel](https://nfcore.slack.com/channels/configs).
+If you have any questions or issues please send us a message on
+[Slack](https://nf-co.re/join/slack) on the
+[`#configs` channel](https://nfcore.slack.com/channels/configs).
 
 ## Azure Resource Requests
 
@@ -199,18 +207,546 @@ For a thorough list, please refer the [Azure Sizes for virtual machines in Azure
 
 ## Running in the background
 
-Nextflow handles job submissions and supervises the running jobs. The Nextflow process must run until the pipeline is finished.
+Nextflow handles job submissions and supervises the running jobs.
+The Nextflow process must run until the pipeline is finished.
 
-The Nextflow `-bg` flag launches Nextflow in the background, detached from your terminal so that the workflow does not stop if you log out of your session. The logs are saved to a file.
+The Nextflow `-bg` flag launches Nextflow in the background, detached from your terminal
+so that the workflow does not stop if you log out of your session. The logs are
+saved to a file.
 
-Alternatively, you can use `screen` / `tmux` or similar tool to create a detached session which you can log back into at a later time.
-Some HPC setups also allow you to run nextflow within a cluster job submitted your job scheduler (from where it submits more jobs).
+Alternatively, you can use `screen` / `tmux` or similar tool to create a detached
+session which you can log back into at a later time.
+Some HPC setups also allow you to run nextflow within a cluster job submitted
+your job scheduler (from where it submits more jobs).
 
 ## Nextflow memory requirements
 
-In some cases, the Nextflow Java virtual machines can start to request a large amount of memory.
-We recommend adding the following line to your environment to limit this (typically in `~/.bashrc` or `~./bash_profile`):
+In some cases, the Nextflow Java virtual machines can start to request a
+large amount of memory.
+We recommend adding the following line to your environment to limit this
+(typically in `~/.bashrc` or `~./bash_profile`):
 
 ```bash
 NXF_OPTS='-Xms1g -Xmx4g'
 ```
+
+## Use case
+
+### Hi-C digestion protocol
+
+Here is an command line example for standard DpnII digestion protocols.
+Alignment will be performed on the `mm10` genome with default parameters.
+Multi-hits will not be considered and duplicates will be removed.
+Note that by default, no filters are applied on DNA and restriction fragment sizes.
+
+```bash
+nextflow run main.nf --input './*_R{1,2}.fastq.gz' --genome 'mm10' --digestion 'dnpii'
+```
+
+### DNase Hi-C protocol
+
+Here is an command line example for DNase protocol.
+Alignment will be performed on the `mm10` genome with default paramters.
+Multi-hits will not be considered and duplicates will be removed.
+Contacts involving fragments separated by less than 1000bp will be discarded.
+
+```bash
+nextflow run main.nf --input './*_R{1,2}.fastq.gz' --genome 'mm10' --dnase --min_cis 1000
+```
+
+## Inputs
+
+### `--input`
+
+Use this to specify the location of your input FastQ files. For example:
+
+```bash
+--input 'path/to/data/sample_*_{1,2}.fastq'
+```
+
+Please note the following requirements:
+
+1. The path must be enclosed in quotes
+2. The path must have at least one `*` wildcard character
+3. When using the pipeline with paired end data, the path must use `{1,2}`
+   notation to specify read pairs.
+
+If left unspecified, a default pattern is used: `data/*{1,2}.fastq.gz`
+
+Note that the Hi-C data analysis workflow requires paired-end data.
+
+## Reference genomes
+
+The pipeline config files come bundled with paths to the Illumina iGenomes reference
+index files. If running with docker or AWS, the configuration is set up to use the
+[AWS-iGenomes](https://ewels.github.io/AWS-iGenomes/) resource.
+
+### `--genome` (using iGenomes)
+
+There are many different species supported in the iGenomes references. To run
+the pipeline, you must specify which to use with the `--genome` flag.
+
+You can find the keys to specify the genomes in the
+[iGenomes config file](https://github.com/nf-core/hic/blob/master/conf/igenomes.config).
+
+### `--fasta`
+
+If you prefer, you can specify the full path to your reference genome when you
+run the pipeline:
+
+```bash
+--fasta '[path to Fasta reference]'
+```
+
+### `--bwt2_index`
+
+The bowtie2 indexes are required to align the data with the HiC-Pro workflow. If the
+`--bwt2_index` is not specified, the pipeline will either use the iGenomes
+bowtie2 indexes (see `--genome` option) or build the indexes on-the-fly
+(see `--fasta` option)
+
+```bash
+--bwt2_index '[path to bowtie2 index]'
+```
+
+### `--chromosome_size`
+
+The Hi-C pipeline also requires a two-column text file with the
+chromosome name and the chromosome size (tab-separated).
+If not specified, this file will be automatically created by the pipeline.
+In the latter case, the `--fasta` reference genome has to be specified.
+
+```bash
+   chr1    249250621
+   chr2    243199373
+   chr3    198022430
+   chr4    191154276
+   chr5    180915260
+   chr6    171115067
+   chr7    159138663
+   chr8    146364022
+   chr9    141213431
+   chr10   135534747
+   (...)
+```
+
+```bash
+--chromosome_size '[path to chromosome size file]'
+```
+
+### `--restriction_fragments`
+
+Finally, Hi-C experiments based on restriction enzyme digestion require a BED
+file with coordinates of restriction fragments.
+
+```bash
+   chr1   0       16007   HIC_chr1_1    0   +
+   chr1   16007   24571   HIC_chr1_2    0   +
+   chr1   24571   27981   HIC_chr1_3    0   +
+   chr1   27981   30429   HIC_chr1_4    0   +
+   chr1   30429   32153   HIC_chr1_5    0   +
+   chr1   32153   32774   HIC_chr1_6    0   +
+   chr1   32774   37752   HIC_chr1_7    0   +
+   chr1   37752   38369   HIC_chr1_8    0   +
+   chr1   38369   38791   HIC_chr1_9    0   +
+   chr1   38791   39255   HIC_chr1_10   0   +
+   (...)
+```
+
+If not specified, this file will be automatically created by the pipeline.
+In this case, the `--fasta` reference genome will be used.
+Note that the `--digestion` or `--restriction_site` parameter is mandatory to create this file.
+
+## Hi-C specific options
+
+The following options are defined in the `nextflow.config` file, and can be
+updated either using a custom configuration file (see `-c` option) or using
+command line parameters.
+
+### HiC-pro mapping
+
+The reads mapping is currently based on the two-steps strategy implemented in
+the HiC-pro pipeline. The idea is to first align reads from end-to-end.
+Reads that do not align are then trimmed at the ligation site, and their 5'
+end is re-aligned to the reference genome.
+Note that the default options are quite stringent, and can be updated according
+to the reads quality or the reference genome.
+
+#### `--bwt2_opts_end2end`
+
+Bowtie2 alignment option for end-to-end mapping.
+Default: '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end
+--reorder'
+
+```bash
+--bwt2_opts_end2end '[Options for bowtie2 step1 mapping on full reads]'
+```
+
+#### `--bwt2_opts_trimmed`
+
+Bowtie2 alignment option for trimmed reads mapping (step 2).
+Default: '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end
+--reorder'
+
+```bash
+--bwt2_opts_trimmed '[Options for bowtie2 step2 mapping on trimmed reads]'
+```
+
+#### `--min_mapq`
+
+Minimum mapping quality. Reads with lower quality are discarded. Default: 10
+
+```bash
+--min_mapq '[Minimum quality value]'
+```
+
+### Digestion Hi-C
+
+#### `--digestion`
+
+This parameter allows to automatically set the `--restriction_site` and
+`--ligation_site` parameter according to the restriction enzyme you used.
+Available keywords are 'hindiii', 'dpnii', 'mboi', 'arima'.
+
+```bash
+--digestion 'hindiii'
+```
+
+#### `--restriction_site`
+
+If the restriction enzyme is not available through the `--digestion`
+parameter, you can also define manually the restriction motif(s) for
+Hi-C digestion protocol.
+The restriction motif(s) is(are) used to generate the list of restriction fragments.
+The precise cutting site of the restriction enzyme has to be specified using
+the '^' character. Default: 'A^AGCTT'
+Here are a few examples:
+
+- MboI: ^GATC
+- DpnII: ^GATC
+- HindIII: A^AGCTT
+- ARIMA kit: ^GATC,G^ANTC
+
+Note that multiples restriction motifs can be provided (comma-separated) and
+that 'N' base are supported.
+
+```bash
+--restriction_size '[Cutting motif]'
+```
+
+#### `--ligation_site`
+
+Ligation motif after reads ligation. This motif is used for reads trimming and
+depends on the fill in strategy.
+Note that multiple ligation sites can be specified (comma-separated) and that
+'N' base is interpreted and replaced by 'A','C','G','T'.
+Default: 'AAGCTAGCTT'
+
+```bash
+--ligation_site '[Ligation motif]'
+```
+
+Exemple of the ARIMA kit: GATCGATC,GANTGATC,GANTANTC,GATCANTC
+
+### DNAse Hi-C
+
+#### `--dnase`
+
+In DNAse Hi-C mode, all options related to digestion Hi-C
+(see previous section) are ignored.
+In this case, it is highly recommended to use the `--min_cis_dist` parameter
+to remove spurious ligation products.
+
+```bash
+--dnase
+```
+
+### HiC-pro processing
+
+#### `--min_restriction_fragment_size`
+
+Minimum size of restriction fragments to consider for the Hi-C processing.
+Default: '0' - no filter
+
+```bash
+--min_restriction_fragment_size '[numeric]'
+```
+
+#### `--max_restriction_fragment_size`
+
+Maximum size of restriction fragments to consider for the Hi-C processing.
+Default: '0' - no filter
+
+```bash
+--max_restriction_fragment_size '[numeric]'
+```
+
+#### `--min_insert_size`
+
+Minimum reads insert size. Shorter 3C products are discarded.
+Default: '0' - no filter
+
+```bash
+--min_insert_size '[numeric]'
+```
+
+#### `--max_insert_size`
+
+Maximum reads insert size. Longer 3C products are discarded.
+Default: '0' - no filter
+
+```bash
+--max_insert_size '[numeric]'
+```
+
+#### `--min_cis_dist`
+
+Filter short range contact below the specified distance.
+Mainly useful for DNase Hi-C. Default: '0'
+
+```bash
+--min_cis_dist '[numeric]'
+```
+
+#### `--keep_dups`
+
+If specified, duplicate reads are not discarded before building contact maps.
+
+```bash
+--keep_dups
+```
+
+#### `--keep_multi`
+
+If specified, reads that aligned multiple times on the genome are not discarded.
+Note the default mapping options are based on random hit assignment, meaning
+that only one position is kept per read.
+Note that in this case the `--min_mapq` parameter is ignored.
+
+```bash
+--keep_multi
+```
+
+## Genome-wide contact maps
+
+Once the list of valid pairs is available, the standard is now to move on the `cooler`
+framework to build the raw and balanced contact maps in txt and (m)cool formats.
+
+### `--bin_size`
+
+Resolution of contact maps to generate (comma-separated).
+Default:'1000000,500000'
+
+```bash
+--bins_size '[string]'
+```
+
+### `--res_zoomify`
+
+Define the maximum resolution to reach when zoomify the cool contact maps.
+Default:'5000'
+
+```bash
+--res_zoomify '[string]'
+```
+
+### HiC-Pro contact maps
+
+By default, the contact maps are now generated with the `cooler` framework.
+However, for backward compatibility, the raw and normalized maps can still be generated
+by HiC-pro if the `--hicpro_maps` parameter is set.
+
+#### `--hicpro_maps`
+
+If specified, the raw and ICE normalized contact maps will be generated by HiC-Pro.
+
+```bash
+--hicpro_maps
+```
+
+#### `--ice_max_iter`
+
+Maximum number of iteration for ICE normalization.
+Default: 100
+
+```bash
+--ice_max_iter '[numeric]'
+```
+
+#### `--ice_filer_low_count_perc`
+
+Define which percentage of bins with low counts should be forced to zero.
+Default: 0.02
+
+```bash
+--ice_filter_low_count_perc '[numeric]'
+```
+
+#### `--ice_filer_high_count_perc`
+
+Define which percentage of bins with low counts should be discarded before
+normalization. Default: 0
+
+```bash
+--ice_filter_high_count_perc '[numeric]'
+```
+
+#### `--ice_eps`
+
+The relative increment in the results before declaring convergence for ICE
+normalization. Default: 0.1
+
+```bash
+--ice_eps '[numeric]'
+```
+
+## Downstream analysis
+
+### Additional quality controls
+
+#### `--res_dist_decay`
+
+Generates distance vs Hi-C counts plots at a given resolution using `HiCExplorer`.
+Several resolutions can be specified (comma-separeted). Default: '250000'
+
+```bash
+--res_dist_decay '[string]'
+```
+
+### Compartment calling
+
+Call open/close compartments for each chromosome, using the `cooltools` command.
+
+#### `--res_compartments`
+
+Resolution to call the chromosome compartments (comma-separated).
+Default: '250000'
+
+```bash
+--res_compartments '[string]'
+```
+
+### TADs calling
+
+#### `--tads_caller`
+
+TADs calling can be performed using different approaches.
+Currently available options are `insulation` and `hicexplorer`.
+Note that all options can be specified (comma-separated).
+Default: 'insulation'
+
+```bash
+--tads_caller '[string]'
+```
+
+#### `--res_tads`
+
+Resolution to run the TADs calling analysis (comma-separated).
+Default: '40000,20000'
+
+```bash
+--res_tads '[string]'
+```
+
+## Inputs/Outputs
+
+### `--split_fastq`
+
+By default, the nf-core Hi-C pipeline expects one read pairs per sample.
+However, for large Hi-C data processing single fastq files can be very
+time consuming.
+The `--split_fastq` option allows to automatically split input read pairs
+into chunks of reads of size `--fastq_chunks_size` (Default : 20000000). In this case, all chunks will be processed in parallel
+and merged before generating the contact maps, thus leading to a significant
+increase of processing performance.
+
+```bash
+--split_fastq --fastq_chunks_size '[numeric]'
+```
+
+### `--save_reference`
+
+If specified, annotation files automatically generated from the `--fasta` file
+are exported in the results folder. Default: false
+
+```bash
+--save_reference
+```
+
+### `--save_aligned_intermediates`
+
+If specified, all intermediate mapping files are saved and exported in the
+results folder. Default: false
+
+```bash
+--save_aligned_inermediates
+```
+
+### `--save_interaction_bam`
+
+If specified, write a BAM file with all classified reads (valid pairs,
+dangling end, self-circle, etc.) and its tags.
+
+```bash
+--save_interaction_bam
+```
+
+## Skip options
+
+### `--skip_maps`
+
+If defined, the workflow stops with the list of valid interactions, and the
+genome-wide maps are not built. Useful for capture-C analysis. Default: false
+
+```bash
+--skip_maps
+```
+
+### `--skip_balancing`
+
+If defined, the contact maps normalization is not run on the raw contact maps.
+Default: false
+
+```bash
+--skip_balancing
+```
+
+### `--skip_cool`
+
+If defined, cooler files are not generated. Default: false
+
+```bash
+--skip_cool
+```
+
+### `--skip_dist_decay`
+
+Do not run distance decay plots. Default: false
+
+```bash
+--skip_dist_decay
+```
+
+### `--skip_compartments`
+
+Do not call compartments. Default: false
+
+```bash
+--skip_compartments
+```
+
+### `--skip_tads`
+
+Do not call TADs. Default: false
+
+```bash
+--skip_tads
+```
+
+### `--skip_multiQC`
+
+If defined, the MultiQC report is not generated. Default: false
+
+```bash
+--skip_multiQC
+```
diff --git a/environment.yml b/environment.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b8abcdfc65aae3942b48c43e1fbb5fe7f2bb8bb1
--- /dev/null
+++ b/environment.yml
@@ -0,0 +1,32 @@
+# You can use this file to create a conda environment for this pipeline:
+#   conda env create -f environment.yml
+name: nf-core-hic-2.0.0
+channels:
+  - conda-forge
+  - bioconda
+  - defaults
+dependencies:
+  - conda-forge::python=3.9.12=h9a8a25e_1_cpython
+  - pip=22.0.4=pyhd8ed1ab_0
+  - conda-forge::tbb=2020.2=hc9558a2_0
+  - conda-forge::scipy=1.8.0=py39hee8e79c_1
+  - conda-forge::numpy=1.22.3=py39hc58783e_2
+  - bioconda::iced=0.5.10=py39h919a90d_1
+  - bioconda::bx-python=0.8.13=py39h6471ffd_1
+  - bioconda::pysam=0.19.0=py39h5030a8b_0
+  - conda-forge::pymdown-extensions=7.1=pyh9f0ad1d_0
+  - bioconda::cooler=0.8.11=pyh5e36f6f_1
+  - bioconda::cooltools=0.5.1=py39h5371cbf_1
+  - bioconda::bowtie2=2.4.5=py39hd2f7db1_2
+  - bioconda::samtools=1.15.1=h1170115_0
+  - bioconda::multiqc=1.12=pyhdfd78af_0
+  - bioconda::fastqc=0.11.9=hdfd78af_1
+
+  ## Dev tools
+  - bioconda::hicexplorer=3.7.2=pyhdfd78af_1
+  - bioconda::bioconductor-hitc=1.38.0=r41hdfd78af_0
+  - conda-forge::r-optparse=1.7.1=r41hc72bb7e_0
+  - bioconda::ucsc-bedgraphtobigwig=377=ha8a8165_3
+  - conda-forge::cython=0.29.28=py39h5a03fae_2
+  - pip:
+      - fanc==0.9.23
diff --git a/lib/WorkflowHic.groovy b/lib/WorkflowHic.groovy
index e51a467a6c8bebfb8ca4c605451d73afcb80c821..f14c26c5b91565eeca44afa6d51e3442e213bf16 100755
--- a/lib/WorkflowHic.groovy
+++ b/lib/WorkflowHic.groovy
@@ -13,10 +13,16 @@ class WorkflowHic {
     public static void initialise(params, log) {
         genomeExistsError(params, log)
 
-
-        if (!params.fasta) {
-            Nextflow.error "Genome fasta file not specified with e.g. '--fasta genome.fa' or via a detectable config file."
+        // digestion parameters
+        if (params.digest && params.digestion && !params.digest.containsKey(params.digestion)) {
+            Nextflow.error "Unknown digestion protocol. Currently, the available digestion options are ${params.digest.keySet().join(", ")}. Please set manually the '--restriction_site' and '--ligation_site' parameters."
         }
+
+        // Check Digestion or DNase Hi-C mode
+        //if (!params.dnase && !params.ligation_site) {
+        //  Nextflow.error "Ligation motif not found. Please either use the `--digestion` parameters or specify the `--restriction_site` and `--ligation_site`. For DNase Hi-C, please use '--dnase' option"
+        //}
+
     }
 
     //
diff --git a/lib/WorkflowMain.groovy b/lib/WorkflowMain.groovy
index 978ce773b7b2536afd1525f678e360a7e4336ea6..e4bce38300fadf0f573d90304d0ae40606327a3a 100755
--- a/lib/WorkflowMain.groovy
+++ b/lib/WorkflowMain.groovy
@@ -11,9 +11,8 @@ class WorkflowMain {
     //
     public static String citation(workflow) {
         return "If you use ${workflow.manifest.name} for your analysis please cite:\n\n" +
-            // TODO nf-core: Add Zenodo DOI for pipeline after first release
-            //"* The pipeline\n" +
-            //"  https://doi.org/10.5281/zenodo.XXXXXXX\n\n" +
+            "* The pipeline\n" +
+            "  https://doi.org/10.5281/zenodo.2669513\n\n" +
             "* The nf-core framework\n" +
             "  https://doi.org/10.1038/s41587-020-0439-x\n\n" +
             "* Software dependencies\n" +
diff --git a/main.nf b/main.nf
index 7dd9be5444a4e18d22ff2c8215f7038b167062a0..82aaf0f200b77db6710472f821c715376d2e5668 100644
--- a/main.nf
+++ b/main.nf
@@ -18,6 +18,7 @@ nextflow.enable.dsl = 2
 */
 
 params.fasta = WorkflowMain.getGenomeAttribute(params, 'fasta')
+params.bwt2_index = WorkflowMain.getGenomeAttribute(params, 'bowtie2')
 
 /*
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/modules.json b/modules.json
index 072b219c4dec46f9322b00995de681988dceef6c..6d7a0306685215f83fe5c69f51f6f54c1927fa04 100644
--- a/modules.json
+++ b/modules.json
@@ -5,22 +5,60 @@
         "https://github.com/nf-core/modules.git": {
             "modules": {
                 "nf-core": {
+                    "bowtie2/align": {
+                        "branch": "master",
+                        "git_sha": "603ecbd9f45300c9788f197d2a15a005685b4220",
+                        "installed_by": ["modules"]
+                    },
+                    "bowtie2/build": {
+                        "branch": "master",
+                        "git_sha": "911696ea0b62df80e900ef244d7867d177971f73",
+                        "installed_by": ["modules"]
+                    },
+                    "cooler/balance": {
+                        "branch": "master",
+                        "git_sha": "911696ea0b62df80e900ef244d7867d177971f73",
+                        "installed_by": ["modules"]
+                    },
+                    "cooler/cload": {
+                        "branch": "master",
+                        "git_sha": "911696ea0b62df80e900ef244d7867d177971f73",
+                        "installed_by": ["modules"]
+                    },
+                    "cooler/dump": {
+                        "branch": "master",
+                        "git_sha": "911696ea0b62df80e900ef244d7867d177971f73",
+                        "installed_by": ["modules"]
+                    },
+                    "cooler/makebins": {
+                        "branch": "master",
+                        "git_sha": "911696ea0b62df80e900ef244d7867d177971f73",
+                        "installed_by": ["modules"]
+                    },
+                    "cooler/zoomify": {
+                        "branch": "master",
+                        "git_sha": "911696ea0b62df80e900ef244d7867d177971f73",
+                        "installed_by": ["modules"]
+                    },
                     "custom/dumpsoftwareversions": {
                         "branch": "master",
-                        "git_sha": "76cc4938c1f6ea5c7d83fed1eeffc146787f9543",
+                        "git_sha": "911696ea0b62df80e900ef244d7867d177971f73",
                         "installed_by": ["modules"]
                     },
-                    "fastqc": {
+                    "custom/getchromsizes": {
                         "branch": "master",
-                        "git_sha": "c8e35eb2055c099720a75538d1b8adb3fb5a464c",
+                        "git_sha": "911696ea0b62df80e900ef244d7867d177971f73",
                         "installed_by": ["modules"]
                     },
-                    "multiqc": {
+                    "fastqc": {
                         "branch": "master",
-                        "git_sha": "f2d63bd5b68925f98f572eed70993d205cc694b7",
+                        "git_sha": "911696ea0b62df80e900ef244d7867d177971f73",
                         "installed_by": ["modules"]
                     }
                 }
+            },
+            "subworkflows": {
+                "nf-core": {}
             }
         }
     }
diff --git a/modules/local/cooltools/eigscis.nf b/modules/local/cooltools/eigscis.nf
new file mode 100644
index 0000000000000000000000000000000000000000..873bd243cd02102f327cba853197d39bc0ea860e
--- /dev/null
+++ b/modules/local/cooltools/eigscis.nf
@@ -0,0 +1,36 @@
+/*
+ * cooltools - call_compartments
+ */
+
+process COOLTOOLS_EIGSCIS {
+    tag "${meta.id}"
+    label 'process_medium'
+
+    conda "bioconda::cooltools=0.5.1 bioconda::ucsc-bedgraphtobigwig=377"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/mulled-v2-c81d8d6b6acf4714ffaae1a274527a41958443f6:cc7ea58b8cefc76bed985dcfe261cb276ed9e0cf-0' :
+        'biocontainers/mulled-v2-c81d8d6b6acf4714ffaae1a274527a41958443f6:cc7ea58b8cefc76bed985dcfe261cb276ed9e0cf-0' }"
+
+    input:
+    tuple val(meta), path(cool), val(resolution)
+    path(fasta)
+    path(chrsize)
+
+    output:
+    path("*compartments*"), emit: results
+    path("versions.yml"), emit: versions
+
+    script:
+    def args = task.ext.args ?: ''
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    """
+    cooltools genome binnify --all-names ${chrsize} ${resolution} > genome_bins.txt
+    cooltools genome gc genome_bins.txt ${fasta} > genome_gc.txt
+    cooltools eigs-cis ${args} -o ${prefix}_compartments ${cool}
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        cooltools: \$(cooltools --version 2>&1 | grep version | sed 's/cooltools, version //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/cooltools/insulation.nf b/modules/local/cooltools/insulation.nf
new file mode 100644
index 0000000000000000000000000000000000000000..af53529ef80baa12dbd81bd0a2b8fa3ec2122cfa
--- /dev/null
+++ b/modules/local/cooltools/insulation.nf
@@ -0,0 +1,32 @@
+/*
+ * Cooltools - diamond-insulation
+ */
+
+process COOLTOOLS_INSULATION {
+    tag "${meta.id}"
+    label 'process_medium'
+
+    conda "bioconda::cooltools=0.5.1"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/cooltools:0.5.1--py37h37892f8_0' :
+        'biocontainers/cooltools:0.5.1--py37h37892f8_0' }"
+
+    input:
+    tuple val(meta), path(cool)
+
+    output:
+    path("*tsv"), emit:tsv
+    path("versions.yml"), emit:versions
+
+    script:
+    def args = task.ext.args ?: ''
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    """
+    cooltools insulation ${cool} ${args} > ${prefix}_insulation.tsv
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        cooltools: \$(cooltools --version 2>&1 | sed 's/cooltools, version //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicexplorer/hicFindTADs.nf b/modules/local/hicexplorer/hicFindTADs.nf
new file mode 100644
index 0000000000000000000000000000000000000000..d86dc837e29e56aa4ba88cd2c66962444046647b
--- /dev/null
+++ b/modules/local/hicexplorer/hicFindTADs.nf
@@ -0,0 +1,34 @@
+/*
+ * hicexplorer - hicFindTADs
+ */
+
+process HIC_FIND_TADS {
+    label 'process_medium'
+
+    conda "bioconda::hicexplorer=3.7.2"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/hicexplorer:3.7.2--pyhdfd78af_1' :
+        'biocontainers/hicexplorer:3.7.2--pyhdfd78af_1' }"
+
+    input:
+    tuple val(meta), path(cool)
+
+    output:
+    path("*hicfindtads*"), emit:results
+    path("versions.yml"), emit:versions
+
+    script:
+    def args = task.ext.args ?: ''
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    """
+    hicFindTADs --matrix ${cool} \
+        --outPrefix ${prefix}_hicfindtads \
+        ${args} \
+        --numberOfProcessors ${task.cpus}
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        hicexplorer: \$(hicFindTADs --version 2>&1 | sed 's/hicFindTADs //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicexplorer/hicPlotDistVsCounts.nf b/modules/local/hicexplorer/hicPlotDistVsCounts.nf
new file mode 100644
index 0000000000000000000000000000000000000000..1143a05d3767ba4a9289e6f423cb7dc768d3d197
--- /dev/null
+++ b/modules/local/hicexplorer/hicPlotDistVsCounts.nf
@@ -0,0 +1,34 @@
+/*
+ * hicexplorer - Genomic distance/counts plots
+ */
+
+process HIC_PLOT_DIST_VS_COUNTS {
+    tag "${meta.id}"
+    label 'process_medium'
+
+    conda "bioconda::hicexplorer=3.7.2"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/hicexplorer:3.7.2--pyhdfd78af_1' :
+        'biocontainers/hicexplorer:3.7.2--pyhdfd78af_1' }"
+
+    input:
+    tuple val(meta), path(cool)
+
+    output:
+    path("*distcount*"), emit:results
+    path("versions.yml"), emit:versions
+
+    script:
+    def args = task.ext.args ?: ''
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    """
+    hicPlotDistVsCounts --matrices ${cool} \
+                        --plotFile ${prefix}_distcount.png \
+                        --outFileData ${prefix}_distcount.txt
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        hicexplorer: \$(hicPlotDistVsCounts --version 2>&1 | sed 's/hicPlotDistVsCounts //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicpro/bowtie2_merge.nf b/modules/local/hicpro/bowtie2_merge.nf
new file mode 100644
index 0000000000000000000000000000000000000000..64aa6e6c05fd9e729ac26a0704f33b802f7dbb0c
--- /dev/null
+++ b/modules/local/hicpro/bowtie2_merge.nf
@@ -0,0 +1,48 @@
+process MERGE_BOWTIE2{
+    tag "${meta.id}"
+    label 'process_medium'
+
+    conda "bioconda::samtools=1.15.1"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
+        'biocontainers/samtools:1.15.1--h1170115_0' }"
+
+    input:
+    tuple val(meta), path(bam1), path(bam2)
+
+    output:
+    tuple val(meta), path("${prefix}_bwt2merged.bam"), emit: bam
+    tuple val(meta), path("${prefix}.mapstat"), emit: stats
+    path("versions.yml"), emit: versions
+
+    script:
+    prefix = task.ext.prefix ?: "${meta.id}"
+    tag = meta.mates
+    """
+    samtools merge -@ ${task.cpus} \\
+        -f ${prefix}_bwt2merged.bam \\
+        ${bam1} ${bam2}
+
+    samtools sort -@ ${task.cpus} -m 800M \\
+        -n \\
+        -o ${prefix}_bwt2merged.sorted.bam \\
+        ${prefix}_bwt2merged.bam
+
+    mv ${prefix}_bwt2merged.sorted.bam ${prefix}_bwt2merged.bam
+
+    echo "## ${prefix}" > ${prefix}.mapstat
+    echo -n "total_${tag}\t" >> ${prefix}.mapstat
+    samtools view -c ${prefix}_bwt2merged.bam >> ${prefix}.mapstat
+    echo -n "mapped_${tag}\t" >> ${prefix}.mapstat
+    samtools view -c -F 4 ${prefix}_bwt2merged.bam >> ${prefix}.mapstat
+    echo -n "global_${tag}\t" >> ${prefix}.mapstat
+    samtools view -c -F 4 ${bam1} >> ${prefix}.mapstat
+    echo -n "local_${tag}\t"  >> ${prefix}.mapstat
+    samtools view -c -F 4 ${bam2} >> ${prefix}.mapstat
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicpro/build_contact_maps.nf b/modules/local/hicpro/build_contact_maps.nf
new file mode 100644
index 0000000000000000000000000000000000000000..271913ec49b1d362b6b0fb065506961d85ad82fc
--- /dev/null
+++ b/modules/local/hicpro/build_contact_maps.nf
@@ -0,0 +1,27 @@
+process BUILD_CONTACT_MAPS{
+    tag "${meta.id}"
+    label 'process_high_memory'
+
+    conda "conda-forge::sed=4.7"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
+        'nf-core/ubuntu:20.04' }"
+
+    input:
+    tuple val(meta), path(vpairs), val(resolution)
+    tuple val(meta2), path(chrsize)
+
+    output:
+    tuple val(meta), val(resolution), path("*.matrix"), path("*.bed"), emit: maps
+
+    script:
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    """
+    build_matrix \\
+        --matrix-format upper  \\
+        --binsize ${resolution} \\
+        --chrsizes ${chrsize} \\
+        --ifile ${vpairs} \\
+        --oprefix ${prefix}
+    """
+}
diff --git a/modules/local/hicpro/combine_mates.nf b/modules/local/hicpro/combine_mates.nf
new file mode 100644
index 0000000000000000000000000000000000000000..b0d828e3200bcb00fb4b94dcb071091ae352b7f7
--- /dev/null
+++ b/modules/local/hicpro/combine_mates.nf
@@ -0,0 +1,29 @@
+process COMBINE_MATES {
+    tag "${meta.id}"
+    label 'process_low'
+
+    conda "conda-forge::python=3.9  bioconda::pysam=0.19.0"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
+        'biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
+
+    input:
+    tuple val(meta), path(bam)
+
+    output:
+    tuple val(meta), path("*bwt2pairs.bam"), emit:bam
+    tuple val(meta), path("*.pairstat"), optional:true, emit:stats
+    path("versions.yml"), emit: versions
+
+    script:
+    prefix = task.ext.prefix ?: "${meta.id}"
+    def args = task.ext.args ?: ''
+    """
+    mergeSAM.py -f ${bam[0]} -r ${bam[1]} -o ${prefix}_bwt2pairs.bam ${args}
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        python: \$(echo \$(python --version 2>&1) | sed 's/Python //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicpro/dnase_mapping_stats.nf b/modules/local/hicpro/dnase_mapping_stats.nf
new file mode 100644
index 0000000000000000000000000000000000000000..8e85113944f79dc0b155ee9be441b2badf5c0245
--- /dev/null
+++ b/modules/local/hicpro/dnase_mapping_stats.nf
@@ -0,0 +1,31 @@
+process MAPPING_STATS_DNASE {
+    tag "$sample = $bam"
+    label 'process_medium'
+
+    conda "bioconda::samtools=1.15.1"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/samtools:1.15.1--h1170115_0' :
+        'biocontainers/samtools:1.15.1--h1170115_0' }"
+
+
+    input:
+    tuple val(meta), path(bam)
+
+    output:
+    tuple val(meta), path(bam), emit:bam
+    tuple val(meta), path("${prefix}.mapstat"), emit:stats
+
+    script:
+    prefix = meta.id + "_" + meta.chunk + "_" + meta.mates
+    tag = meta.mates
+    """
+    echo "## ${prefix}" > ${prefix}.mapstat
+    echo -n "total_${tag}\t" >> ${prefix}.mapstat
+    samtools view -c ${bam} >> ${prefix}.mapstat
+    echo -n "mapped_${tag}\t" >> ${prefix}.mapstat
+    samtools view -c -F 4 ${bam} >> ${prefix}.mapstat
+    echo -n "global_${tag}\t" >> ${prefix}.mapstat
+    samtools view -c -F 4 ${bam} >> ${prefix}.mapstat
+    echo -n "local_${tag}\t0"  >> ${prefix}.mapstat
+    """
+}
diff --git a/modules/local/hicpro/get_restriction_fragments.nf b/modules/local/hicpro/get_restriction_fragments.nf
new file mode 100644
index 0000000000000000000000000000000000000000..56cd74a24a7d5102aad25cbe7032e31e2c4a1452
--- /dev/null
+++ b/modules/local/hicpro/get_restriction_fragments.nf
@@ -0,0 +1,27 @@
+process GET_RESTRICTION_FRAGMENTS {
+    tag "$res_site"
+    label 'process_low'
+
+    conda "conda-forge::python=3.9 conda-forge::numpy=1.22.3"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
+        'biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
+
+    input:
+    tuple val(meta), path(fasta)
+    val(res_site)
+
+    output:
+    tuple val(meta), path("*.bed"), emit: results
+    path("versions.yml"), emit: versions
+
+    script:
+    """
+    digest_genome.py -r ${res_site} -o restriction_fragments.bed ${fasta}
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        python: \$(echo \$(python --version 2>&1) | sed 's/Python //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicpro/get_valid_interaction.nf b/modules/local/hicpro/get_valid_interaction.nf
new file mode 100644
index 0000000000000000000000000000000000000000..bebd42197471c214ce0cde63c88ea89224387e76
--- /dev/null
+++ b/modules/local/hicpro/get_valid_interaction.nf
@@ -0,0 +1,37 @@
+process GET_VALID_INTERACTION {
+    tag "$meta.id"
+    label 'process_low'
+
+    conda "conda-forge::python=3.9  bioconda::pysam=0.19.0 bioconda::bx-python=0.8.13"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
+        'biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
+
+    input:
+    tuple val(meta), path(bam)
+    tuple val(meta2), path(resfrag)
+
+    output:
+    tuple val(meta), path("*.validPairs"), emit:valid_pairs
+    tuple val(meta), path("*.DEPairs"), optional:true, emit:de_pairs
+    tuple val(meta), path("*.SCPairs"), optional: true, emit:sc_pairs
+    tuple val(meta), path("*.REPairs"), optional: true, emit:re_pairs
+    tuple val(meta), path("*.FiltPairs"), optional: true, emit:filt_pairs
+    tuple val(meta), path("*RSstat"), optional: true, emit:stats
+    path("versions.yml"), emit: versions
+
+    script:
+    def args = task.ext.args ?: ''
+    """
+    mapped_2hic_fragments.py \\
+        -f ${resfrag} \\
+        -r ${bam} \\
+        --all \\
+        ${args}
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        python: \$(echo \$(python --version 2>&1) | sed 's/Python //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicpro/get_valid_interaction_dnase.nf b/modules/local/hicpro/get_valid_interaction_dnase.nf
new file mode 100644
index 0000000000000000000000000000000000000000..d62318f87feac914b8b07796792fa18e363eef7e
--- /dev/null
+++ b/modules/local/hicpro/get_valid_interaction_dnase.nf
@@ -0,0 +1,30 @@
+process GET_VALID_INTERACTION_DNASE {
+    tag "$meta.id"
+    label 'process_low'
+
+    conda "conda-forge::python=3.9 bioconda::pysam=0.19.0 bioconda::bx-python=0.8.13"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
+        'biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
+
+    input:
+    tuple val(meta), path(bam)
+
+    output:
+    tuple val(meta), path("*.validPairs"), emit:valid_pairs
+    tuple val(meta), path("*RSstat"), optional: true, emit:stats
+    path("versions.yml"), emit: versions
+
+    script:
+    def args = task.ext.args ?: ''
+    """
+    mapped_2hic_dnase.py \\
+        -r ${bam} \\
+        ${args}
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        python: \$(echo \$(python --version 2>&1) | sed 's/Python //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicpro/hicpro2pairs.nf b/modules/local/hicpro/hicpro2pairs.nf
new file mode 100644
index 0000000000000000000000000000000000000000..eb9b86b5e4ca0ace6549fe0c33a9dd1f63aaf5f3
--- /dev/null
+++ b/modules/local/hicpro/hicpro2pairs.nf
@@ -0,0 +1,31 @@
+process HICPRO2PAIRS {
+    tag "$meta.id"
+    label 'process_medium'
+
+    conda "bioconda::pairix=0.3.7"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/pairix:0.3.7--py36h30a8e3e_3' :
+        'biocontainers/pairix:0.3.7--py36h30a8e3e_3' }"
+
+    input:
+    tuple val(meta), path(vpairs)
+    tuple val(meta2), path(chrsize)
+
+    output:
+    tuple val(meta), path("*.pairs.gz"), path("*.pairs.gz.px2"), emit: pairs
+    path("versions.yml"), emit: versions
+
+    script:
+    prefix = "${meta.id}"
+    """
+    ##columns: readID chr1 pos1 chr2 pos2 strand1 strand2
+    awk '{OFS="\t";print \$1,\$2,\$3,\$5,\$6,\$4,\$7}' $vpairs | bgzip -c > ${prefix}_contacts.pairs.gz
+    ##sort -k2,2 -k4,4 -k3,3n -k5,5n ${prefix}_contacts.pairs | bgzip -c > ${prefix}_contacts.pairs.gz
+    pairix -f ${prefix}_contacts.pairs.gz
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        pairix: \$(echo \$(pairix 2>&1 | grep Version | sed -e 's/Version: //'))
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicpro/merge_stats.nf b/modules/local/hicpro/merge_stats.nf
new file mode 100644
index 0000000000000000000000000000000000000000..d9c2aecb5b2eb5c7574e139f7ffbbf3c8d3289de
--- /dev/null
+++ b/modules/local/hicpro/merge_stats.nf
@@ -0,0 +1,32 @@
+process MERGE_STATS {
+    tag "${meta.id}"
+    label 'process_low'
+
+    conda "conda-forge::python=3.9"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
+        'biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0'}"
+
+    input:
+    tuple val(meta), path(fstat)
+
+    output:
+    path("${meta.id}/"), emit: mqc
+    path("*.{mmapstat,mpairstat,mRSstat}"), emit: stats
+    path("versions.yml"), emit:versions
+
+    script:
+    if ( (fstat =~ /.mapstat/) ){ ext = "${meta.mates}.mmapstat" }
+    if ( (fstat =~ /.pairstat/) ){ ext = "mpairstat" }
+    if ( (fstat =~ /.RSstat/) ){ ext = "mRSstat" }
+    """
+    mkdir -p ${meta.id}
+    merge_statfiles.py -f ${fstat} > ${meta.id}.${ext}
+    cp *${ext} ${meta.id}/
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        python: \$(echo \$(python --version 2>&1) | sed 's/Python //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicpro/merge_valid_interaction.nf b/modules/local/hicpro/merge_valid_interaction.nf
new file mode 100644
index 0000000000000000000000000000000000000000..2dcb63d176704a97fcb87fa3cf391f6ec9f5888d
--- /dev/null
+++ b/modules/local/hicpro/merge_valid_interaction.nf
@@ -0,0 +1,34 @@
+process MERGE_VALID_INTERACTION {
+    tag "$prefix"
+    label 'process_high_memory'
+
+    conda "conda-forge::gawk=5.1.0"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
+        'nf-core/ubuntu:20.04' }"
+
+    input:
+    tuple val(meta), path(vpairs)
+
+    output:
+    tuple val(meta), path("*.allValidPairs"), emit: valid_pairs
+    path("${meta.id}/"), emit:mqc
+    path("*mergestat"), emit:stats
+    path("versions.yml"), emit: versions
+
+    script:
+    prefix = meta.id
+    def args = task.ext.args ?: ''
+    """
+    hicpro_merge_validpairs.sh ${args} -p ${prefix} ${vpairs}
+
+    ## For MultiQC
+    mkdir -p ${prefix}
+    cp ${prefix}_allValidPairs.mergestat ${prefix}/
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        sort: \$(echo \$(sort --version 2>&1 | head -1 | awk '{print \$NF}' 2>&1))
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicpro/run_ice.nf b/modules/local/hicpro/run_ice.nf
new file mode 100644
index 0000000000000000000000000000000000000000..7ef5727378fdfabfb7b93e2852c11131481c0a11
--- /dev/null
+++ b/modules/local/hicpro/run_ice.nf
@@ -0,0 +1,32 @@
+process ICE_NORMALIZATION {
+    tag "$meta.id"
+    label 'process_high_memory'
+
+    conda "conda-forge::python=3.9 bioconda::iced=0.5.10 conda-forge::numpy=1.22.3"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' :
+        'biocontainers/mulled-v2-c6ff206325681cbb9c9ef890bb8de554172c0483:713df51cd897ceb893b9a6e6420f527d83c2ed95-0' }"
+
+    input:
+    tuple val(meta), val(res), path(rmaps), path(bed)
+
+    output:
+    tuple val(meta), val(res), path("*iced.matrix"), path(bed), emit:maps
+    path ("*.biases"), emit:bias
+    path("versions.yml"), emit: versions
+
+    script:
+    prefix = rmaps.toString() - ~/(\.matrix)?$/
+    """
+    ice --filter_low_counts_perc ${params.ice_filter_low_count_perc} \
+        --results_filename ${prefix}_iced.matrix \
+        --filter_high_counts_perc ${params.ice_filter_high_count_perc} \
+        --max_iter ${params.ice_max_iter} --eps ${params.ice_eps} --remove-all-zeros-loci --output-bias 1 --verbose 1 ${rmaps}
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        python: \$(echo \$(python --version 2>&1) | sed 's/Python //')
+        iced: \$(python -c "import iced; print(iced.__version__)")
+    END_VERSIONS
+    """
+}
diff --git a/modules/local/hicpro/trim_reads.nf b/modules/local/hicpro/trim_reads.nf
new file mode 100644
index 0000000000000000000000000000000000000000..b28d4930a21f5af2b84b435ef3ba53575f31d7bc
--- /dev/null
+++ b/modules/local/hicpro/trim_reads.nf
@@ -0,0 +1,32 @@
+process TRIM_READS {
+    tag "$meta.id"
+    label 'process_low'
+
+    conda "conda-forge::sed=4.7"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
+        'nf-core/ubuntu:20.04' }"
+
+    input:
+    tuple val(meta), path(reads)
+    val(motif)
+
+    output:
+    tuple val(meta), path("*trimmed.fastq.gz"), emit: fastq
+    path("versions.yml") , emit: versions
+
+    script:
+    """
+    zcat ${reads} > tmp.fastq
+    cutsite_trimming --fastq tmp.fastq \\
+        --cutsite ${motif[0]} \\
+        --out ${reads.simpleName}_trimmed.fastq
+    gzip ${reads.simpleName}_trimmed.fastq
+    /bin/rm -f tmp.fastq
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        gzip: \$(echo \$(gzip --version 2>&1) | head -1 | cut -d" " -f2)
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/multiqc/main.nf b/modules/local/multiqc.nf
similarity index 50%
rename from modules/nf-core/multiqc/main.nf
rename to modules/local/multiqc.nf
index 4b604749f5adef79787ae5802881109b9695e8fc..453513fbd586e81e0c069f8029339d37f87c75c3 100644
--- a/modules/nf-core/multiqc/main.nf
+++ b/modules/local/multiqc.nf
@@ -1,16 +1,17 @@
 process MULTIQC {
-    label 'process_single'
+    label 'process_medium'
 
     conda "bioconda::multiqc=1.14"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/multiqc:1.14--pyhdfd78af_0' :
-        'quay.io/biocontainers/multiqc:1.14--pyhdfd78af_0' }"
+        'biocontainers/multiqc:1.14--pyhdfd78af_0' }"
 
     input:
-    path  multiqc_files, stageAs: "?/*"
-    path(multiqc_config)
-    path(extra_multiqc_config)
-    path(multiqc_logo)
+    path multiqc_config
+    path (mqc_custom_config)
+    path workflow_summary
+    path ('fastqc/*')
+    path ('input_*/*')
 
     output:
     path "*multiqc_report.html", emit: report
@@ -23,27 +24,8 @@ process MULTIQC {
 
     script:
     def args = task.ext.args ?: ''
-    def config = multiqc_config ? "--config $multiqc_config" : ''
-    def extra_config = extra_multiqc_config ? "--config $extra_multiqc_config" : ''
     """
-    multiqc \\
-        --force \\
-        $args \\
-        $config \\
-        $extra_config \\
-        .
-
-    cat <<-END_VERSIONS > versions.yml
-    "${task.process}":
-        multiqc: \$( multiqc --version | sed -e "s/multiqc, version //g" )
-    END_VERSIONS
-    """
-
-    stub:
-    """
-    touch multiqc_data
-    touch multiqc_plots
-    touch multiqc_report.html
+    multiqc -f $args .
 
     cat <<-END_VERSIONS > versions.yml
     "${task.process}":
diff --git a/modules/local/split_cooler_dump.nf b/modules/local/split_cooler_dump.nf
new file mode 100644
index 0000000000000000000000000000000000000000..b2f9610fcf55558abd0d815a1e4c56e81b772d4f
--- /dev/null
+++ b/modules/local/split_cooler_dump.nf
@@ -0,0 +1,32 @@
+process SPLIT_COOLER_DUMP {
+    tag "$meta.id"
+    label 'process_low'
+
+    conda "conda-forge::gawk=5.1.0"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/ubuntu:20.04' :
+        'nf-core/ubuntu:20.04' }"
+
+    input:
+    tuple val(meta), path(bedpe)
+
+    output:
+    tuple val(meta), path("*.txt"), emit: matrix
+    path ("versions.yml"), emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ''
+    prefix = bedpe.toString() - ~/(\_balanced)?.bedpe$/
+    """
+    cat ${bedpe} | awk '{OFS="\t"; print \$1,\$2,\$3}' > ${prefix}_raw.txt
+    cat ${bedpe} | awk '{OFS="\t"; print \$1,\$2,\$4}' > ${prefix}_balanced.txt
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        cooler: \$(awk --version | head -1 | cut -f1 -d, | sed -e 's/GNU Awk //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/bowtie2/align/main.nf b/modules/nf-core/bowtie2/align/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..311a1505eccdd3825db6ad8652305cb445148ae3
--- /dev/null
+++ b/modules/nf-core/bowtie2/align/main.nf
@@ -0,0 +1,71 @@
+process BOWTIE2_ALIGN {
+    tag "$meta.id"
+    label "process_high"
+
+    conda "bioconda::bowtie2=2.4.4 bioconda::samtools=1.16.1 conda-forge::pigz=2.6"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:a0ffedb52808e102887f6ce600d092675bf3528a-0' :
+        'biocontainers/mulled-v2-ac74a7f02cebcfcc07d8e8d1d750af9c83b4d45a:a0ffedb52808e102887f6ce600d092675bf3528a-0' }"
+
+    input:
+    tuple val(meta) , path(reads)
+    tuple val(meta2), path(index)
+    val   save_unaligned
+    val   sort_bam
+
+    output:
+    tuple val(meta), path("*.bam")    , emit: bam
+    tuple val(meta), path("*.log")    , emit: log
+    tuple val(meta), path("*fastq.gz"), emit: fastq, optional:true
+    path  "versions.yml"              , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ""
+    def args2 = task.ext.args2 ?: ""
+    def prefix = task.ext.prefix ?: "${meta.id}"
+
+    def unaligned = ""
+    def reads_args = ""
+    if (meta.single_end) {
+        unaligned = save_unaligned ? "--un-gz ${prefix}.unmapped.fastq.gz" : ""
+        reads_args = "-U ${reads}"
+    } else {
+        unaligned = save_unaligned ? "--un-conc-gz ${prefix}.unmapped.fastq.gz" : ""
+        reads_args = "-1 ${reads[0]} -2 ${reads[1]}"
+    }
+
+    def samtools_command = sort_bam ? 'sort' : 'view'
+
+    """
+    INDEX=`find -L ./ -name "*.rev.1.bt2" | sed "s/\\.rev.1.bt2\$//"`
+    [ -z "\$INDEX" ] && INDEX=`find -L ./ -name "*.rev.1.bt2l" | sed "s/\\.rev.1.bt2l\$//"`
+    [ -z "\$INDEX" ] && echo "Bowtie2 index files not found" 1>&2 && exit 1
+
+    bowtie2 \\
+        -x \$INDEX \\
+        $reads_args \\
+        --threads $task.cpus \\
+        $unaligned \\
+        $args \\
+        2> ${prefix}.bowtie2.log \\
+        | samtools $samtools_command $args2 --threads $task.cpus -o ${prefix}.bam -
+
+    if [ -f ${prefix}.unmapped.fastq.1.gz ]; then
+        mv ${prefix}.unmapped.fastq.1.gz ${prefix}.unmapped_1.fastq.gz
+    fi
+
+    if [ -f ${prefix}.unmapped.fastq.2.gz ]; then
+        mv ${prefix}.unmapped.fastq.2.gz ${prefix}.unmapped_2.fastq.gz
+    fi
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        bowtie2: \$(echo \$(bowtie2 --version 2>&1) | sed 's/^.*bowtie2-align-s version //; s/ .*\$//')
+        samtools: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
+        pigz: \$( pigz --version 2>&1 | sed 's/pigz //g' )
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/bowtie2/align/meta.yml b/modules/nf-core/bowtie2/align/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c8e9a001290d0feddaa121424a5a9c65ae568396
--- /dev/null
+++ b/modules/nf-core/bowtie2/align/meta.yml
@@ -0,0 +1,67 @@
+name: bowtie2_align
+description: Align reads to a reference genome using bowtie2
+keywords:
+  - align
+  - map
+  - fasta
+  - fastq
+  - genome
+  - reference
+tools:
+  - bowtie2:
+      description: |
+        Bowtie 2 is an ultrafast and memory-efficient tool for aligning
+        sequencing reads to long reference sequences.
+      homepage: http://bowtie-bio.sourceforge.net/bowtie2/index.shtml
+      documentation: http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml
+      doi: 10.1038/nmeth.1923
+      licence: ["GPL-3.0-or-later"]
+input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - reads:
+      type: file
+      description: |
+        List of input FastQ files of size 1 and 2 for single-end and paired-end data,
+        respectively.
+  - meta2:
+      type: map
+      description: |
+        Groovy Map containing reference information
+        e.g. [ id:'test', single_end:false ]
+  - index:
+      type: file
+      description: Bowtie2 genome index files
+      pattern: "*.ebwt"
+  - save_unaligned:
+      type: boolean
+      description: |
+        Save reads that do not map to the reference (true) or discard them (false)
+        (default: false)
+  - sort_bam:
+      type: boolean
+      description: use samtools sort (true) or samtools view (false)
+      pattern: "true or false"
+output:
+  - bam:
+      type: file
+      description: Output BAM file containing read alignments
+      pattern: "*.{bam}"
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+  - fastq:
+      type: file
+      description: Unaligned FastQ files
+      pattern: "*.fastq.gz"
+  - log:
+      type: file
+      description: Aligment log
+      pattern: "*.log"
+authors:
+  - "@joseespinosa"
+  - "@drpatelh"
diff --git a/modules/nf-core/bowtie2/build/main.nf b/modules/nf-core/bowtie2/build/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..566a4accda99ed7c9c56564f6c3348bec439df11
--- /dev/null
+++ b/modules/nf-core/bowtie2/build/main.nf
@@ -0,0 +1,30 @@
+process BOWTIE2_BUILD {
+    tag "$fasta"
+    label 'process_high'
+
+    conda "bioconda::bowtie2=2.4.4"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/bowtie2:2.4.4--py39hbb4e92a_0' :
+        'biocontainers/bowtie2:2.4.4--py39hbb4e92a_0' }"
+
+    input:
+    tuple val(meta), path(fasta)
+
+    output:
+    tuple val(meta), path('bowtie2')    , emit: index
+    path "versions.yml"                 , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ''
+    """
+    mkdir bowtie2
+    bowtie2-build $args --threads $task.cpus $fasta bowtie2/${fasta.baseName}
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        bowtie2: \$(echo \$(bowtie2 --version 2>&1) | sed 's/^.*bowtie2-align-s version //; s/ .*\$//')
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/bowtie2/build/meta.yml b/modules/nf-core/bowtie2/build/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0240224d532af842d762c5c947fff6e84e5be113
--- /dev/null
+++ b/modules/nf-core/bowtie2/build/meta.yml
@@ -0,0 +1,43 @@
+name: bowtie2_build
+description: Builds bowtie index for reference genome
+keywords:
+  - build
+  - index
+  - fasta
+  - genome
+  - reference
+tools:
+  - bowtie2:
+      description: |
+        Bowtie 2 is an ultrafast and memory-efficient tool for aligning
+        sequencing reads to long reference sequences.
+      homepage: http://bowtie-bio.sourceforge.net/bowtie2/index.shtml
+      documentation: http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml
+      doi: 10.1038/nmeth.1923
+      licence: ["GPL-3.0-or-later"]
+input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing reference information
+        e.g. [ id:'test', single_end:false ]
+  - fasta:
+      type: file
+      description: Input genome fasta file
+output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing reference information
+        e.g. [ id:'test', single_end:false ]
+  - index:
+      type: file
+      description: Bowtie2 genome index files
+      pattern: "*.bt2"
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+authors:
+  - "@joseespinosa"
+  - "@drpatelh"
diff --git a/modules/nf-core/cooler/balance/main.nf b/modules/nf-core/cooler/balance/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..6e131d5b4717de6a6e7079b067154f7dd7e4e937
--- /dev/null
+++ b/modules/nf-core/cooler/balance/main.nf
@@ -0,0 +1,39 @@
+process COOLER_BALANCE {
+    tag "$meta.id"
+    label 'process_high'
+
+    conda "bioconda::cooler=0.8.11"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0':
+        'biocontainers/cooler:0.8.11--pyh3252c3a_0' }"
+
+    input:
+    tuple val(meta), path(cool), val(resolution)
+
+    output:
+    tuple val(meta), path("${prefix}.${extension}"), emit: cool
+    path "versions.yml"                            , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ''
+    prefix = task.ext.prefix ?: "${meta.id}"
+    suffix = resolution ? "::/resolutions/$resolution" : ""
+    extension = cool.getExtension()
+    if ("$cool" == "${prefix}.${extension}") error "Input and output names are the same, use \"task.ext.prefix\" to disambiguate!"
+    """
+    cp ${cool} ${prefix}.${extension}
+
+    cooler balance \\
+        $args \\
+        -p ${task.cpus} \\
+        ${prefix}.${extension}${suffix}
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        cooler: \$(cooler --version 2>&1 | sed 's/cooler, version //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/cooler/balance/meta.yml b/modules/nf-core/cooler/balance/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..af1a780002701ad6bbf7d64105e5e8153098f5ec
--- /dev/null
+++ b/modules/nf-core/cooler/balance/meta.yml
@@ -0,0 +1,45 @@
+name: "cooler_balance"
+description: Run matrix balancing on a cool file
+keywords:
+  - balance
+tools:
+  - "cooler":
+      description: Sparse binary format for genomic interaction matrices
+      homepage: https://open2c.github.io/cooler/
+      documentation: https://cooler.readthedocs.io/en/latest/index.html
+      tool_dev_url: https://github.com/open2c/cooler
+      doi: "10.1093/bioinformatics/btz540"
+      licence: ["BSD-3-Clause"]
+
+input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - cool:
+      type: file
+      description: Path to COOL file
+      pattern: "*.{cool,mcool}"
+  - resolution:
+      type: value
+      description: Resolution
+
+output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+  - cool:
+      type: file
+      description: Output COOL file balancing weigths
+      pattern: "*.cool"
+
+authors:
+  - "@nservant"
+  - "@muffato"
diff --git a/modules/nf-core/cooler/cload/main.nf b/modules/nf-core/cooler/cload/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..80109d48f449af278016bbda6cd08c46bbf8e183
--- /dev/null
+++ b/modules/nf-core/cooler/cload/main.nf
@@ -0,0 +1,39 @@
+process COOLER_CLOAD {
+    tag "$meta.id"
+    label 'process_high'
+
+    conda "bioconda::cooler=0.8.11"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0' :
+        'biocontainers/cooler:0.8.11--pyh3252c3a_0' }"
+
+    input:
+    tuple val(meta), path(pairs), path(index), val(cool_bin)
+    path chromsizes
+
+    output:
+    tuple val(meta), path("*.cool"), val(cool_bin), emit: cool
+    path "versions.yml"                           , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ''
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    def nproc  = args.contains('pairix') || args.contains('tabix')? "--nproc $task.cpus" : ''
+
+    """
+    cooler cload \\
+        $args \\
+        $nproc \\
+        ${chromsizes}:${cool_bin} \\
+        $pairs \\
+        ${prefix}.cool
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        cooler: \$(cooler --version 2>&1 | sed 's/cooler, version //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/cooler/cload/meta.yml b/modules/nf-core/cooler/cload/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8513aaec1ed8558f850367f453ee55eb9d378b11
--- /dev/null
+++ b/modules/nf-core/cooler/cload/meta.yml
@@ -0,0 +1,53 @@
+name: cooler_cload
+description: Create a cooler from genomic pairs and bins
+keywords:
+  - cool
+tools:
+  - cooler:
+      description: Sparse binary format for genomic interaction matrices
+      homepage: https://open2c.github.io/cooler/
+      documentation: https://cooler.readthedocs.io/en/latest/index.html
+      tool_dev_url: https://github.com/open2c/cooler
+      doi: "10.1093/bioinformatics/btz540"
+      licence: ["BSD-3-clause"]
+
+input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - pairs:
+      type: file
+      description: Path to contacts (i.e. read pairs) file.
+  - index:
+      type: file
+      description: Path to index file of the contacts.
+  - cool_bin:
+      type: value
+      description: Bins size in bp
+  - chromsizes:
+      type: file
+      description: Path to a chromsizes file.
+
+output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - version:
+      type: file
+      description: File containing software version
+      pattern: "versions.yml"
+  - cool:
+      type: file
+      description: Output COOL file path
+      pattern: "*.cool"
+  - cool_bin:
+      type: value
+      description: Bins size in bp
+
+authors:
+  - "@jianhong"
+  - "@muffato"
diff --git a/modules/nf-core/cooler/dump/main.nf b/modules/nf-core/cooler/dump/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..fed7502f8cb6a788b056c87aab3a2e71d26dd400
--- /dev/null
+++ b/modules/nf-core/cooler/dump/main.nf
@@ -0,0 +1,35 @@
+process COOLER_DUMP {
+    tag "$meta.id"
+    label 'process_high'
+
+    conda "bioconda::cooler=0.8.11"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0' :
+        'biocontainers/cooler:0.8.11--pyh3252c3a_0' }"
+
+    input:
+    tuple val(meta), path(cool), val(resolution)
+
+    output:
+    tuple val(meta), path("*.bedpe"), emit: bedpe
+    path "versions.yml"             , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ''
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    def suffix = resolution ? "::/resolutions/$resolution" : ""
+    """
+    cooler dump \\
+        $args \\
+        -o ${prefix}.bedpe \\
+        $cool$suffix
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        cooler: \$(cooler --version 2>&1 | sed 's/cooler, version //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/cooler/dump/meta.yml b/modules/nf-core/cooler/dump/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..fe60523eb3556a6bb2187be0563c3fd6f2cbf5cf
--- /dev/null
+++ b/modules/nf-core/cooler/dump/meta.yml
@@ -0,0 +1,45 @@
+name: cooler_dump
+description: Dump a cooler’s data to a text stream.
+keywords:
+  - dump
+tools:
+  - cooler:
+      description: Sparse binary format for genomic interaction matrices
+      homepage: https://open2c.github.io/cooler/
+      documentation: https://cooler.readthedocs.io/en/latest/index.html
+      tool_dev_url: https://github.com/open2c/cooler
+      doi: "10.1093/bioinformatics/btz540"
+      licence: ["BSD-3-Clause"]
+
+input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - cool:
+      type: file
+      description: Path to COOL file
+      pattern: "*.{cool,mcool}"
+  - resolution:
+      type: value
+      description: Resolution
+
+output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+  - bedpe:
+      type: file
+      description: Output text file
+      pattern: "*.bedpe"
+
+authors:
+  - "@jianhong"
+  - "@muffato"
diff --git a/modules/nf-core/cooler/makebins/main.nf b/modules/nf-core/cooler/makebins/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..25d6a40f636257c90251893c74eb2769200b41c1
--- /dev/null
+++ b/modules/nf-core/cooler/makebins/main.nf
@@ -0,0 +1,34 @@
+process COOLER_MAKEBINS {
+    tag "${meta.id}}"
+    label 'process_low'
+
+    conda "bioconda::cooler=0.8.11"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0':
+        'biocontainers/cooler:0.8.11--pyh3252c3a_0' }"
+
+    input:
+    tuple val(meta), path(chromsizes), val(cool_bin)
+
+    output:
+    tuple val(meta), path("*.bed"), emit: bed
+    path "versions.yml"           , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args   = task.ext.args   ?: ''
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    """
+    cooler makebins \\
+        $args \\
+        ${chromsizes} \\
+        ${cool_bin} > ${prefix}.bed
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        cooler: \$(cooler --version 2>&1 | sed 's/cooler, version //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/cooler/makebins/meta.yml b/modules/nf-core/cooler/makebins/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..33fd8eb63f4a3225d3d03eb028578b2c1eeeaa5d
--- /dev/null
+++ b/modules/nf-core/cooler/makebins/meta.yml
@@ -0,0 +1,34 @@
+name: "cooler_makebins"
+description: Generate fixed-width genomic bins
+keywords:
+  - makebins
+tools:
+  - "cooler":
+      description: Sparse binary format for genomic interaction matrices
+      homepage: https://open2c.github.io/cooler/
+      documentation: https://cooler.readthedocs.io/en/latest/index.html
+      tool_dev_url: https://github.com/open2c/cooler
+      doi: "10.1093/bioinformatics/btz540"
+      licence: ["BSD-3-Clause"]
+
+input:
+  - chromsize:
+      type: file
+      description: Path to chromosome size file
+  - cool_bin:
+      type: value
+      description: Resolution (bin size) in base pairs
+
+output:
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+  - bed:
+      type: file
+      description: Genome segmentation at a fixed resolution as a BED file.
+      pattern: "*.bed"
+
+authors:
+  - "@nservant"
+  - "@muffato"
diff --git a/modules/nf-core/cooler/zoomify/main.nf b/modules/nf-core/cooler/zoomify/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..95e7daff0d19429507d01033eb78eba491625220
--- /dev/null
+++ b/modules/nf-core/cooler/zoomify/main.nf
@@ -0,0 +1,35 @@
+process COOLER_ZOOMIFY {
+    tag "$meta.id"
+    label 'process_high'
+
+    conda "bioconda::cooler=0.8.11"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/cooler:0.8.11--pyh3252c3a_0' :
+        'biocontainers/cooler:0.8.11--pyh3252c3a_0' }"
+
+    input:
+    tuple val(meta), path(cool)
+
+    output:
+    tuple val(meta), path("*.mcool"), emit: mcool
+    path "versions.yml"             , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ''
+    def prefix = task.ext.prefix ?: "${meta.id}"
+    """
+    cooler zoomify \\
+        $args \\
+        -n $task.cpus \\
+        -o ${prefix}.mcool \\
+        $cool
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        cooler: \$(cooler --version 2>&1 | sed 's/cooler, version //')
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/cooler/zoomify/meta.yml b/modules/nf-core/cooler/zoomify/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..57f554861b25d4ca9edac66c73ff306a4d9f9390
--- /dev/null
+++ b/modules/nf-core/cooler/zoomify/meta.yml
@@ -0,0 +1,41 @@
+name: cooler_zoomify
+description: Generate a multi-resolution cooler file by coarsening
+keywords:
+  - mcool
+tools:
+  - cooler:
+      description: Sparse binary format for genomic interaction matrices
+      homepage: https://open2c.github.io/cooler/
+      documentation: https://cooler.readthedocs.io/en/latest/index.html
+      tool_dev_url: https://github.com/open2c/cooler
+      doi: "10.1093/bioinformatics/btz540"
+      licence: ["BSD-3-clause"]
+
+input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - cool:
+      type: file
+      description: Path to COOL file
+      pattern: "*.{cool,mcool}"
+
+output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+  - mcool:
+      type: file
+      description: Output mcool file
+      pattern: "*.mcool"
+
+authors:
+  - "@jianhong"
diff --git a/modules/nf-core/custom/dumpsoftwareversions/main.nf b/modules/nf-core/custom/dumpsoftwareversions/main.nf
index 800a60991a3fc2da7acf5211b7d2d05a3b3fbdc2..ebc8727339b0ee4ce7e2feb981a0aaf416723b57 100644
--- a/modules/nf-core/custom/dumpsoftwareversions/main.nf
+++ b/modules/nf-core/custom/dumpsoftwareversions/main.nf
@@ -5,7 +5,7 @@ process CUSTOM_DUMPSOFTWAREVERSIONS {
     conda "bioconda::multiqc=1.14"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/multiqc:1.14--pyhdfd78af_0' :
-        'quay.io/biocontainers/multiqc:1.14--pyhdfd78af_0' }"
+        'biocontainers/multiqc:1.14--pyhdfd78af_0' }"
 
     input:
     path versions
diff --git a/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
index e55b8d43a918be8875e1de2066deea54153d086a..da03340857c4c90957c79c9f892030bc1bb397a3 100755
--- a/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
+++ b/modules/nf-core/custom/dumpsoftwareversions/templates/dumpsoftwareversions.py
@@ -4,11 +4,10 @@
 """Provide functions to merge multiple versions.yml files."""
 
 
+import yaml
 import platform
 from textwrap import dedent
 
-import yaml
-
 
 def _make_versions_html(versions):
     """Generate a tabular HTML output of all versions for MultiQC."""
diff --git a/modules/nf-core/custom/getchromsizes/main.nf b/modules/nf-core/custom/getchromsizes/main.nf
new file mode 100644
index 0000000000000000000000000000000000000000..060a2e88527af75fabb1d87858c2885b6ca09547
--- /dev/null
+++ b/modules/nf-core/custom/getchromsizes/main.nf
@@ -0,0 +1,44 @@
+process CUSTOM_GETCHROMSIZES {
+    tag "$fasta"
+    label 'process_single'
+
+    conda "bioconda::samtools=1.16.1"
+    container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
+        'https://depot.galaxyproject.org/singularity/samtools:1.16.1--h6899075_1' :
+        'biocontainers/samtools:1.16.1--h6899075_1' }"
+
+    input:
+    tuple val(meta), path(fasta)
+
+    output:
+    tuple val(meta), path ("*.sizes"), emit: sizes
+    tuple val(meta), path ("*.fai")  , emit: fai
+    tuple val(meta), path ("*.gzi")  , emit: gzi, optional: true
+    path  "versions.yml"             , emit: versions
+
+    when:
+    task.ext.when == null || task.ext.when
+
+    script:
+    def args = task.ext.args ?: ''
+    """
+    samtools faidx $fasta
+    cut -f 1,2 ${fasta}.fai > ${fasta}.sizes
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        getchromsizes: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
+    END_VERSIONS
+    """
+
+    stub:
+    """
+    touch ${fasta}.fai
+    touch ${fasta}.sizes
+
+    cat <<-END_VERSIONS > versions.yml
+    "${task.process}":
+        getchromsizes: \$(echo \$(samtools --version 2>&1) | sed 's/^.*samtools //; s/Using.*\$//')
+    END_VERSIONS
+    """
+}
diff --git a/modules/nf-core/custom/getchromsizes/meta.yml b/modules/nf-core/custom/getchromsizes/meta.yml
new file mode 100644
index 0000000000000000000000000000000000000000..219ca1d8e07166c23e28089ae5067b5937cc6f8d
--- /dev/null
+++ b/modules/nf-core/custom/getchromsizes/meta.yml
@@ -0,0 +1,53 @@
+name: custom_getchromsizes
+description: Generates a FASTA file of chromosome sizes and a fasta index file
+keywords:
+  - fasta
+  - chromosome
+  - indexing
+tools:
+  - samtools:
+      description: Tools for dealing with SAM, BAM and CRAM files
+      homepage: http://www.htslib.org/
+      documentation: http://www.htslib.org/doc/samtools.html
+      tool_dev_url: https://github.com/samtools/samtools
+      doi: 10.1093/bioinformatics/btp352
+      licence: ["MIT"]
+
+input:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - fasta:
+      type: file
+      description: FASTA file
+      pattern: "*.{fa,fasta,fna,fas}"
+
+output:
+  - meta:
+      type: map
+      description: |
+        Groovy Map containing sample information
+        e.g. [ id:'test', single_end:false ]
+  - sizes:
+      type: file
+      description: File containing chromosome lengths
+      pattern: "*.{sizes}"
+  - fai:
+      type: file
+      description: FASTA index file
+      pattern: "*.{fai}"
+  - gzi:
+      type: file
+      description: Optional gzip index file for compressed inputs
+      pattern: "*.gzi"
+  - versions:
+      type: file
+      description: File containing software versions
+      pattern: "versions.yml"
+
+authors:
+  - "@tamara-hodgetts"
+  - "@chris-cheshire"
+  - "@muffato"
diff --git a/modules/nf-core/fastqc/main.nf b/modules/nf-core/fastqc/main.nf
index 9ae5838158b28d2ae49270133fbbfe0ea673e991..07d5e433128b416714860fe1dca659219ba1d1ad 100644
--- a/modules/nf-core/fastqc/main.nf
+++ b/modules/nf-core/fastqc/main.nf
@@ -5,7 +5,7 @@ process FASTQC {
     conda "bioconda::fastqc=0.11.9"
     container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ?
         'https://depot.galaxyproject.org/singularity/fastqc:0.11.9--0' :
-        'quay.io/biocontainers/fastqc:0.11.9--0' }"
+        'biocontainers/fastqc:0.11.9--0' }"
 
     input:
     tuple val(meta), path(reads)
diff --git a/modules/nf-core/multiqc/meta.yml b/modules/nf-core/multiqc/meta.yml
deleted file mode 100644
index f93b5ee51907e991cb10045f5355a4552a27db40..0000000000000000000000000000000000000000
--- a/modules/nf-core/multiqc/meta.yml
+++ /dev/null
@@ -1,56 +0,0 @@
-# yaml-language-server: $schema=https://raw.githubusercontent.com/nf-core/modules/master/modules/yaml-schema.json
-name: MultiQC
-description: Aggregate results from bioinformatics analyses across many samples into a single report
-keywords:
-  - QC
-  - bioinformatics tools
-  - Beautiful stand-alone HTML report
-tools:
-  - multiqc:
-      description: |
-        MultiQC searches a given directory for analysis logs and compiles a HTML report.
-        It's a general use tool, perfect for summarising the output from numerous bioinformatics tools.
-      homepage: https://multiqc.info/
-      documentation: https://multiqc.info/docs/
-      licence: ["GPL-3.0-or-later"]
-
-input:
-  - multiqc_files:
-      type: file
-      description: |
-        List of reports / files recognised by MultiQC, for example the html and zip output of FastQC
-  - multiqc_config:
-      type: file
-      description: Optional config yml for MultiQC
-      pattern: "*.{yml,yaml}"
-  - extra_multiqc_config:
-      type: file
-      description: Second optional config yml for MultiQC. Will override common sections in multiqc_config.
-      pattern: "*.{yml,yaml}"
-  - multiqc_logo:
-      type: file
-      description: Optional logo file for MultiQC
-      pattern: "*.{png}"
-
-output:
-  - report:
-      type: file
-      description: MultiQC report file
-      pattern: "multiqc_report.html"
-  - data:
-      type: directory
-      description: MultiQC data dir
-      pattern: "multiqc_data"
-  - plots:
-      type: file
-      description: Plots created by MultiQC
-      pattern: "*_data"
-  - versions:
-      type: file
-      description: File containing software versions
-      pattern: "versions.yml"
-authors:
-  - "@abhi18av"
-  - "@bunop"
-  - "@drpatelh"
-  - "@jfy133"
diff --git a/nextflow.config b/nextflow.config
index b1f98b3463e5516a12e44e2dca75c58b0058e4dc..7d3ee4d21a4d86722dbbe170c9deeca56d7b7328 100644
--- a/nextflow.config
+++ b/nextflow.config
@@ -9,15 +9,87 @@
 // Global default params, used in configs
 params {
 
-    // TODO nf-core: Specify your pipeline's command line flags
     // Input options
-    input                      = null
+    input = null
 
 
     // References
-    genome                     = null
-    igenomes_base              = 's3://ngi-igenomes/igenomes'
-    igenomes_ignore            = false
+    genome = null
+    igenomes_base = 's3://ngi-igenomes/igenomes'
+    igenomes_ignore = false
+    chromosome_size = null
+    restriction_fragments = null
+    save_reference = false
+
+    // Mapping
+    split_fastq = false
+    fastq_chunks_size = 20000000
+    save_interaction_bam = false
+    save_aligned_intermediates = false
+    bwt2_opts_end2end = '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'
+    bwt2_opts_trimmed = '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'
+    keep_dups = false
+    keep_multi = false
+    min_mapq = 10
+
+    // Digestion Hi-C
+    digestion = null
+    ligation_site = null
+    restriction_site = null
+    digest {
+      'hindiii'{
+         restriction_site='A^AGCTT'
+         ligation_site='AAGCTAGCTT'
+      }
+      'mboi' {
+         restriction_site='^GATC'
+         ligation_site='GATCGATC'
+      }
+      'dpnii' {
+         restriction_site='^GATC'
+         ligation_site='GATCGATC'
+      }
+      'arima' {
+         restriction_site='^GATC,G^ANTC'
+         ligation_site='GATCGATC,GATCANTC,GANTGATC,GANTANTC'
+      }
+    }
+
+    min_restriction_fragment_size = 0
+    max_restriction_fragment_size = 0
+    min_insert_size = 0
+    max_insert_size = 0
+    save_pairs_intermediates = false
+
+    // Dnase Hi-C
+    dnase = false
+    min_cis_dist = 0
+
+    // Contact maps
+    save_raw_maps = false
+    bin_size = '1000000'
+    res_zoomify = null
+    hicpro_maps = false
+    ice_max_iter = 100
+    ice_filter_low_count_perc = 0.02
+    ice_filter_high_count_perc =  0
+    ice_eps = 0.1
+
+    // Downstream Analysis
+    res_dist_decay = '250000'
+    tads_caller = 'insulation'
+    res_tads = '40000'
+    res_compartments = '250000'
+
+    // Workflow
+    skip_maps = false
+    skip_balancing = false
+    skip_mcool = false
+    skip_dist_decay = false
+    skip_compartments = false
+    skip_tads = false
+    skip_multiqc = false
+
     // MultiQC options
     multiqc_config             = null
     multiqc_title              = null
@@ -26,7 +98,7 @@ params {
     multiqc_methods_description = null
 
     // Boilerplate options
-    outdir                     = null
+    outdir                     = './results'
     tracedir                   = "${params.outdir}/pipeline_info"
     publish_dir_mode           = 'copy'
     email                      = null
@@ -38,8 +110,7 @@ params {
     version                    = false
     validate_params            = true
     show_hidden_params         = false
-    schema_ignore_params       = 'genomes'
-
+    schema_ignore_params       = 'genomes,digest'
 
     // Config options
     custom_config_version      = 'master'
@@ -61,6 +132,9 @@ params {
 // Load base.config by default for all pipelines
 includeConfig 'conf/base.config'
 
+// Load modules.config for DSL2 module specific options
+includeConfig 'conf/modules.config'
+
 // Load nf-core custom profiles from different Institutions
 try {
     includeConfig "${params.custom_config_base}/nfcore_custom.config"
@@ -168,6 +242,9 @@ profiles {
         executor.cpus          = 16
         executor.memory        = 60.GB
     }
+    public_aws_ecr {
+        includeConfig 'conf/public_aws_ecr.config'
+    }
     test      { includeConfig 'conf/test.config'      }
     test_full { includeConfig 'conf/test_full.config' }
 }
@@ -195,6 +272,12 @@ env {
 // Capture exit codes from upstream processes when piping
 process.shell = ['/bin/bash', '-euo', 'pipefail']
 
+// Set default registry for Docker and Podman independent of -profile
+// Will not be used unless Docker / Podman are enabled
+// Set to your registry if you have a mirror of containers
+docker.registry = 'quay.io'
+podman.registry = 'quay.io'
+
 def trace_timestamp = new java.util.Date().format( 'yyyy-MM-dd_HH-mm-ss')
 timeline {
     enabled = true
diff --git a/nextflow_schema.json b/nextflow_schema.json
index f908822bbce06ea8321c733ad042262d6a6775f2..a0c433072b777fefb3f981f266180cd3c5e8ac3e 100644
--- a/nextflow_schema.json
+++ b/nextflow_schema.json
@@ -58,7 +58,7 @@
                     "type": "string",
                     "format": "file-path",
                     "mimetype": "text/plain",
-                    "pattern": "^\\S+\\.fn?a(sta)?(\\.gz)?$",
+                    "pattern": "^\\S+\\.fn?s?a(sta)?(\\.gz)?$",
                     "description": "Path to FASTA genome file.",
                     "help_text": "This parameter is *mandatory* if `--genome` is not specified. If you don't have a BWA index available this will be generated for you automatically. Combine with `--save_reference` to save BWA index for future runs.",
                     "fa_icon": "far fa-file-code"
@@ -77,6 +77,264 @@
                     "fa_icon": "fas fa-ban",
                     "hidden": true,
                     "help_text": "Do not load `igenomes.config` when running the pipeline. You may choose this option if you observe clashes between custom parameters and those supplied in `igenomes.config`."
+                },
+                "bwt2_index": {
+                    "type": "string",
+                    "description": "Full path to directory containing Bowtie index including base name. i.e. `/path/to/index/base`.",
+                    "fa_icon": "far fa-file-alt"
+                }
+            }
+        },
+        "digestion_hi_c": {
+            "title": "Digestion Hi-C",
+            "type": "object",
+            "description": "Parameters for protocols based on restriction enzyme",
+            "default": "",
+            "properties": {
+                "digestion": {
+                    "type": "string",
+                    "description": "Name of restriction enzyme to automatically set the restriction_site and ligation_site options (hindiii, mboi, dpnii, arima)",
+                    "enum": ["hindiii", "mboi", "dpnii", "arima"]
+                },
+                "restriction_site": {
+                    "type": "string",
+                    "default": null,
+                    "description": "Restriction motifs used during digestion. Several motifs (comma separated) can be provided."
+                },
+                "ligation_site": {
+                    "type": "string",
+                    "default": null,
+                    "description": "Expected motif after DNA ligation.  Several motifs (comma separated) can be provided."
+                },
+                "chromosome_size": {
+                    "type": "string",
+                    "format": "file-path",
+                    "description": "Full path to file specifying chromosome sizes (tab separated with chromosome name and size)`.",
+                    "fa_icon": "far fa-file-alt",
+                    "help_text": "If not specified, the pipeline will build this file from the reference genome file"
+                },
+                "restriction_fragments": {
+                    "type": "string",
+                    "format": "file-path",
+                    "description": "Full path to restriction fragment (bed) file.",
+                    "fa_icon": "far fa-file-alt",
+                    "help_text": "This file depends on the Hi-C protocols and digestion strategy. If not provided, the pipeline will build it using the --restriction_site option"
+                },
+                "save_reference": {
+                    "type": "boolean",
+                    "description": "If generated by the pipeline save the annotation and indexes in the results directory.",
+                    "help_text": "Use this parameter to save all annotations to your results folder. These can then be used for future pipeline runs, reducing processing times.",
+                    "fa_icon": "fas fa-save"
+                }
+            }
+        },
+        "dnase_hi_c": {
+            "title": "DNAse Hi-C",
+            "type": "object",
+            "description": "Parameters for protocols based on DNAse digestion",
+            "default": "",
+            "properties": {
+                "dnase": {
+                    "type": "boolean",
+                    "description": "For Hi-C protocols which are not based on enzyme digestion such as DNase Hi-C"
+                },
+                "min_cis_dist": {
+                    "type": "integer",
+                    "description": "Minimum distance between loci to consider. Useful for --dnase mode to remove spurious ligation products. Only values > 0 are considered"
+                }
+            }
+        },
+        "alignments": {
+            "title": "Alignments",
+            "type": "object",
+            "description": "Parameters for reads aligments",
+            "default": "",
+            "fa_icon": "fas fa-bahai",
+            "properties": {
+                "split_fastq": {
+                    "type": "boolean",
+                    "description": "Split the reads into chunks before running the pipelne",
+                    "fa_icon": "fas fa-dna"
+                },
+                "fastq_chunks_size": {
+                    "type": "integer",
+                    "description": "Read number per chunks if split_fastq is used",
+                    "default": 20000000
+                },
+                "min_mapq": {
+                    "type": "integer",
+                    "default": 10,
+                    "description": "Keep aligned reads with a minimum quality value"
+                },
+                "bwt2_opts_end2end": {
+                    "type": "string",
+                    "default": "'--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'",
+                    "description": "Option for HiC-Pro end-to-end bowtie mapping"
+                },
+                "bwt2_opts_trimmed": {
+                    "type": "string",
+                    "default": "'--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'",
+                    "description": "Option for HiC-Pro trimmed reads mapping"
+                },
+                "save_aligned_intermediates": {
+                    "type": "boolean",
+                    "description": "Save all BAM files during two-steps mapping"
+                }
+            }
+        },
+        "valid_pairs_detection": {
+            "title": "Valid Pairs Detection",
+            "type": "object",
+            "description": "Options to call significant interactions",
+            "default": "",
+            "fa_icon": "fas fa-signature",
+            "properties": {
+                "keep_dups": {
+                    "type": "boolean",
+                    "description": "Keep duplicated reads"
+                },
+                "keep_multi": {
+                    "type": "boolean",
+                    "description": "Keep multi-aligned reads"
+                },
+                "max_insert_size": {
+                    "type": "integer",
+                    "description": "Maximum fragment size to consider. Only values > 0 are considered"
+                },
+                "min_insert_size": {
+                    "type": "integer",
+                    "description": "Minimum fragment size to consider. Only values > 0 are considered"
+                },
+                "max_restriction_fragment_size": {
+                    "type": "integer",
+                    "description": "Maximum restriction fragment size to consider. Only values > 0 are considered"
+                },
+                "min_restriction_fragment_size": {
+                    "type": "integer",
+                    "description": "Minimum restriction fragment size to consider. Only values > 0 are considered"
+                },
+                "save_interaction_bam": {
+                    "type": "boolean",
+                    "description": "Save a BAM file where all reads are flagged by their interaction classes"
+                },
+                "save_pairs_intermediates": {
+                    "type": "boolean",
+                    "description": "Save all types of non valid read pairs in distinct output files"
+                }
+            }
+        },
+        "contact_maps": {
+            "title": "Contact maps",
+            "type": "object",
+            "description": "Options to build Hi-C contact maps",
+            "default": "",
+            "fa_icon": "fas fa-chess-board",
+            "properties": {
+                "bin_size": {
+                    "type": "string",
+                    "pattern": "^(\\d+)(,\\d+)*$",
+                    "default": "1000000,500000",
+                    "description": "Resolution to build the maps (comma separated)"
+                },
+                "hicpro_maps": {
+                    "type": "boolean",
+                    "description": "Generate raw and normalized contact maps with HiC-Pro"
+                },
+                "ice_filter_low_count_perc": {
+                    "type": "number",
+                    "default": 0.02,
+                    "description": "Filter low counts rows before HiC-Pro normalization"
+                },
+                "ice_filter_high_count_perc": {
+                    "type": "integer",
+                    "description": "Filter high counts rows before HiC-Pro normalization"
+                },
+                "ice_eps": {
+                    "type": "number",
+                    "default": 0.1,
+                    "description": "Threshold for HiC-Pro ICE convergence"
+                },
+                "ice_max_iter": {
+                    "type": "integer",
+                    "default": 100,
+                    "description": "Maximum number of iteraction for HiC-Pro ICE normalization"
+                },
+                "res_zoomify": {
+                    "type": "string",
+                    "default": "5000",
+                    "description": "Maximum resolution to build mcool file"
+                },
+                "save_raw_maps": {
+                    "type": "boolean",
+                    "description": "Save raw contact maps"
+                }
+            }
+        },
+        "downstream_analysis": {
+            "title": "Downstream Analysis",
+            "type": "object",
+            "description": "Set up downstream analysis from contact maps",
+            "default": "",
+            "properties": {
+                "res_dist_decay": {
+                    "type": "string",
+                    "pattern": "^(\\d+)(,\\d+)*$",
+                    "default": "1000000",
+                    "description": "Resolution to build count/distance plot"
+                },
+                "tads_caller": {
+                    "type": "string",
+                    "default": "hicexplorer,insulation",
+                    "description": "Define methods for TADs calling"
+                },
+                "res_tads": {
+                    "type": "string",
+                    "pattern": "^(\\d+)(,\\d+)*$",
+                    "default": "40000,20000",
+                    "description": "Resolution to run TADs callers (comma separated)"
+                },
+                "res_compartments": {
+                    "type": "string",
+                    "pattern": "^(\\d+)(,\\d+)*$",
+                    "default": "250000",
+                    "description": "Resolution for compartments calling"
+                }
+            }
+        },
+        "skip_options": {
+            "title": "Skip options",
+            "type": "object",
+            "description": "Skip some steps of the pipeline",
+            "default": "",
+            "fa_icon": "fas fa-random",
+            "properties": {
+                "skip_maps": {
+                    "type": "boolean",
+                    "description": "Do not build contact maps"
+                },
+                "skip_dist_decay": {
+                    "type": "boolean",
+                    "description": "Do not run distance/decay plot"
+                },
+                "skip_tads": {
+                    "type": "boolean",
+                    "description": "Do not run TADs calling"
+                },
+                "skip_compartments": {
+                    "type": "boolean",
+                    "description": "Do not run compartments calling"
+                },
+                "skip_balancing": {
+                    "type": "boolean",
+                    "description": "Do not run cooler balancing normalization"
+                },
+                "skip_mcool": {
+                    "type": "boolean",
+                    "description": "Do not generate mcool file for Higlass visualization"
+                },
+                "skip_multiqc": {
+                    "type": "boolean",
+                    "description": "Do not generate MultiQC report"
                 }
             }
         },
@@ -104,9 +362,8 @@
                 },
                 "config_profile_name": {
                     "type": "string",
-                    "description": "Institutional config name.",
-                    "hidden": true,
-                    "fa_icon": "fas fa-users-cog"
+                    "description": "Institutional config name",
+                    "hidden": true
                 },
                 "config_profile_description": {
                     "type": "string",
@@ -274,6 +531,30 @@
         {
             "$ref": "#/definitions/reference_genome_options"
         },
+        {
+            "$ref": "#/definitions/digestion_hi_c"
+        },
+        {
+            "$ref": "#/definitions/dnase_hi_c"
+        },
+        {
+            "$ref": "#/definitions/alignments"
+        },
+        {
+            "$ref": "#/definitions/valid_pairs_detection"
+        },
+        {
+            "$ref": "#/definitions/contact_maps"
+        },
+        {
+            "$ref": "#/definitions/downstream_analysis"
+        },
+        {
+            "$ref": "#/definitions/skip_options"
+        },
+        {
+            "$ref": "#/definitions/generic_options"
+        },
         {
             "$ref": "#/definitions/institutional_config_options"
         },
diff --git a/subworkflows/local/compartments.nf b/subworkflows/local/compartments.nf
new file mode 100644
index 0000000000000000000000000000000000000000..fee68a5d5d8c3a061d02a6f698aff80186b8d7c3
--- /dev/null
+++ b/subworkflows/local/compartments.nf
@@ -0,0 +1,23 @@
+include { COOLTOOLS_EIGSCIS } from '../../modules/local/cooltools/eigscis'
+
+workflow COMPARTMENTS {
+
+  take:
+  cool
+  fasta
+  chrsize
+
+  main:
+  ch_versions = Channel.empty()
+
+  COOLTOOLS_EIGSCIS(
+    cool,
+    fasta.map{it -> it[1]}.collect(),
+    chrsize.map{it -> it[1]}.collect()
+  )
+  ch_versions = ch_versions.mix(COOLTOOLS_EIGSCIS.out.versions)
+
+  emit:
+  versions = ch_versions
+  compartments = COOLTOOLS_EIGSCIS.out.results
+}
\ No newline at end of file
diff --git a/subworkflows/local/cooler.nf b/subworkflows/local/cooler.nf
new file mode 100644
index 0000000000000000000000000000000000000000..1299266d61c0e8ee5a7e7b429e8ef205df7ebd8a
--- /dev/null
+++ b/subworkflows/local/cooler.nf
@@ -0,0 +1,97 @@
+/*
+ * COOLER MAIN WORKFLOW
+ * INPUT : .pair text file with the list of valid interaction
+ * OUTPUT : cooler files
+ */
+
+include { COOLER_ZOOMIFY } from '../../modules/nf-core/cooler/zoomify/main'
+include { COOLER_DUMP } from '../../modules/nf-core/cooler/dump/main' 
+include { COOLER_CLOAD } from '../../modules/nf-core/cooler/cload/main' 
+include { COOLER_BALANCE } from '../../modules/nf-core/cooler/balance/main'
+include { COOLER_MAKEBINS } from '../../modules/nf-core/cooler/makebins/main'
+
+include { SPLIT_COOLER_DUMP } from '../../modules/local/split_cooler_dump'
+
+// add resolution in meta
+def addResolution(row) {
+  def meta = [:]
+  meta.id = row[0].id
+  meta.resolution = row[2]
+  return [meta, row[1], row[2]]
+}
+
+workflow COOLER {
+
+  take:
+  pairs // [meta, pairs, index]
+  chromsize // [meta, chromsize]
+  cool_bins
+
+  main:
+  ch_versions = Channel.empty()
+
+  //*****************************************
+  // EXPORT BINS
+
+  COOLER_MAKEBINS(
+    chromsize.combine(cool_bins)
+  )
+  ch_versions = ch_versions.mix(COOLER_MAKEBINS.out.versions)
+
+  //*****************************************
+  // BUILD COOL FILE PER RESOLUTION
+  // [meta, pairs, resolution]
+
+  COOLER_CLOAD(
+    pairs.combine(cool_bins),
+    chromsize.map{it -> it[1]}.collect()
+  )
+  ch_versions = ch_versions.mix(COOLER_CLOAD.out.versions)
+
+  // Add resolution in meta
+  COOLER_CLOAD.out.cool
+    .map{ it -> addResolution(it) }
+    .set{ ch_cool }
+
+  COOLER_BALANCE(
+    ch_cool.map{[it[0], it[1], ""]}
+  )
+  ch_versions = ch_versions.mix(COOLER_BALANCE.out.versions)
+
+  // Zoomify at minimum bin resolution
+  if (!params.res_zoomify){
+    ch_res_zoomify = cool_bins.min()
+  }else{
+    ch_res_zoomify = Channel.from(params.res_zoomify).splitCsv().flatten().unique().toInteger()
+  }
+
+  ch_cool
+    .combine(ch_res_zoomify)
+    .filter{ it[2] == it[3] }
+    .map{ it->[it[0], it[1]] }
+    .set{ ch_cool_zoomify }
+
+  COOLER_ZOOMIFY(
+    ch_cool_zoomify
+  )
+  ch_versions = ch_versions.mix(COOLER_ZOOMIFY.out.versions)
+
+  //*****************************************
+  // DUMP DATA
+  // [meta, cool] / resolution
+
+  COOLER_DUMP(
+    COOLER_BALANCE.out.cool.map{[it[0], it[1], ""]}
+  )
+  ch_versions = ch_versions.mix(COOLER_DUMP.out.versions)
+
+  SPLIT_COOLER_DUMP(
+    COOLER_DUMP.out.bedpe
+  )
+  ch_versions = ch_versions.mix(SPLIT_COOLER_DUMP.out.versions)
+
+  emit:
+  versions = ch_versions
+  cool = COOLER_BALANCE.out.cool
+  mcool = COOLER_ZOOMIFY.out.mcool
+}
\ No newline at end of file
diff --git a/subworkflows/local/hicpro.nf b/subworkflows/local/hicpro.nf
new file mode 100644
index 0000000000000000000000000000000000000000..8b106a0820cf808501add6e5ad886cc726626107
--- /dev/null
+++ b/subworkflows/local/hicpro.nf
@@ -0,0 +1,132 @@
+/*
+ * HICPRO
+ * MAIN WORKFLOW
+ * From the raw sequencing reads to the list of valid interactions
+ */
+  
+include { HICPRO_MAPPING } from './hicpro_mapping'
+include { GET_VALID_INTERACTION } from '../../modules/local/hicpro/get_valid_interaction'
+include { GET_VALID_INTERACTION_DNASE } from '../../modules/local/hicpro/get_valid_interaction_dnase'
+include { MERGE_VALID_INTERACTION } from '../../modules/local/hicpro/merge_valid_interaction'
+include { MERGE_STATS } from '../../modules/local/hicpro/merge_stats'
+include { HICPRO2PAIRS } from '../../modules/local/hicpro/hicpro2pairs'
+include { BUILD_CONTACT_MAPS } from '../../modules/local/hicpro/build_contact_maps'
+include { ICE_NORMALIZATION } from '../../modules/local/hicpro/run_ice'
+
+// Remove meta.chunks
+def removeChunks(row){
+  meta = row[0].clone()
+  meta.remove('chunk')
+  return [meta, row[1]]
+}
+
+workflow HICPRO {
+
+  take:
+  reads // [meta, read1, read2]
+  index // path
+  fragments // path
+  chrsize // path
+  ligation_site // value
+  map_res // values
+
+  main:
+  ch_versions = Channel.empty()
+
+  // Fastq to paired-end bam
+  HICPRO_MAPPING(
+    reads,
+    index,
+    ligation_site
+  )
+  ch_versions = ch_versions.mix(HICPRO_MAPPING.out.versions)
+
+  //***************************************
+  // DIGESTION PROTOCOLS
+
+  if (!params.dnase){
+    GET_VALID_INTERACTION (
+      HICPRO_MAPPING.out.bam,
+      fragments.collect()
+    )
+    ch_versions = ch_versions.mix(GET_VALID_INTERACTION.out.versions)
+    ch_valid_pairs = GET_VALID_INTERACTION.out.valid_pairs
+    ch_valid_stats = GET_VALID_INTERACTION.out.stats
+
+  }else{
+
+  //****************************************
+  // DNASE-LIKE PROTOCOLS
+
+    GET_VALID_INTERACTION_DNASE (
+      HICPRO_MAPPING.out.bam
+    )
+    ch_versions = ch_versions.mix(GET_VALID_INTERACTION_DNASE.out.versions)
+    ch_valid_pairs = GET_VALID_INTERACTION_DNASE.out.valid_pairs
+    ch_valid_stats = GET_VALID_INTERACTION_DNASE.out.stats
+  }
+  
+
+  //**************************************
+  // MERGE AND REMOVE DUPLICATES
+  
+  //if (params.split_fastq){
+  ch_valid_pairs = ch_valid_pairs.map{ it -> removeChunks(it)}.groupTuple()
+  ch_hicpro_stats = HICPRO_MAPPING.out.mapstats.map{it->removeChunks(it)}.groupTuple()
+                      .concat(HICPRO_MAPPING.out.pairstats.map{it->removeChunks(it)}.groupTuple(),
+		        ch_valid_stats.map{it->removeChunks(it)}.groupTuple())
+  //}else{
+  //  ch_hicpro_stats = HICPRO_MAPPING.out.mapstats.groupTuple()
+  //                      .concat(HICPRO_MAPPING.out.pairstats.groupTuple(),
+  //                              ch_valid_stats.groupTuple())
+  //}
+
+  MERGE_VALID_INTERACTION (
+    ch_valid_pairs
+  )
+  ch_versions = ch_versions.mix(MERGE_VALID_INTERACTION.out.versions)
+
+  MERGE_STATS(
+    ch_hicpro_stats
+  )
+  ch_versions = ch_versions.mix(MERGE_STATS.out.versions)
+
+  //***************************************
+  // CONVERTS TO PAIRS
+  HICPRO2PAIRS (
+    MERGE_VALID_INTERACTION.out.valid_pairs,
+    chrsize.collect()
+  )
+  ch_versions = ch_versions.mix(HICPRO2PAIRS.out.versions)
+
+  //***************************************
+  // CONTACT MAPS
+  
+  if (params.hicpro_maps){    
+
+    //build_contact_maps
+    BUILD_CONTACT_MAPS(
+      MERGE_VALID_INTERACTION.out.valid_pairs.combine(map_res),
+      chrsize.collect()
+    )
+    ch_hicpro_raw_maps = BUILD_CONTACT_MAPS.out.maps
+ 
+    // run_ice
+    ICE_NORMALIZATION(
+      BUILD_CONTACT_MAPS.out.maps
+    )
+    ch_hicpro_iced_maps = ICE_NORMALIZATION.out.maps
+    ch_versions = ch_versions.mix(ICE_NORMALIZATION.out.versions)
+
+  }else{
+    ch_hicpro_raw_maps = Channel.empty()
+    ch_hicpro_iced_maps = Channel.empty()
+  }
+
+  emit:
+  versions = ch_versions
+  pairs = HICPRO2PAIRS.out.pairs
+  mqc = MERGE_VALID_INTERACTION.out.mqc.concat(MERGE_STATS.out.mqc)
+  raw_maps = ch_hicpro_raw_maps
+  iced_maps = ch_hicpro_iced_maps
+}
diff --git a/subworkflows/local/hicpro_mapping.nf b/subworkflows/local/hicpro_mapping.nf
new file mode 100644
index 0000000000000000000000000000000000000000..0f889e95f425becd78f6ab097ea035b6bd6e7abf
--- /dev/null
+++ b/subworkflows/local/hicpro_mapping.nf
@@ -0,0 +1,115 @@
+/*
+ * HiC-Pro mapping
+ * From the raw sequencing reads to a paired-end bam file
+ */
+
+include { BOWTIE2_ALIGN } from '../../modules/nf-core/bowtie2/align/main'
+include { TRIM_READS } from '../../modules/local/hicpro/trim_reads'
+include { BOWTIE2_ALIGN as BOWTIE2_ALIGN_TRIMMED } from '../../modules/nf-core/bowtie2/align/main'
+include { MERGE_BOWTIE2 } from '../../modules/local/hicpro/bowtie2_merge'
+include { COMBINE_MATES} from '../../modules/local/hicpro/combine_mates'
+include { MAPPING_STATS_DNASE } from '../../modules/local/hicpro/dnase_mapping_stats'
+
+// Paired-end to Single-end 
+def pairToSingle(row, mates) {
+  def meta = row[0].clone()
+  meta.single_end = true
+  meta.mates = mates
+  if (mates == "R1") {
+    return [meta, [ row[1][0]] ]
+  }else if (mates == "R2"){
+    return [meta, [ row[1][1]] ]
+  }
+}
+
+// Single-end to Paired-end
+def singleToPair(row){
+  def meta = row[0].clone()
+  meta.remove('mates')
+  meta.single_end = false
+  return [ meta, row[1] ]
+}
+
+
+workflow HICPRO_MAPPING {
+
+  take:
+  reads // [meta, read1, read2]
+  index // [meta, path]
+  ligation_site // value
+
+  main:
+  ch_versions = Channel.empty()
+ 
+  // Align each mates separetly and add mates information in [meta]
+  ch_reads_r1 = reads.map{ it -> pairToSingle(it,"R1") }
+  ch_reads_r2 = reads.map{ it -> pairToSingle(it,"R2") }
+  ch_reads = ch_reads_r1.concat(ch_reads_r2)
+
+  // bowtie2 - save_unaligned=true - sort_bam=false
+  BOWTIE2_ALIGN(
+    ch_reads,
+    index.collect(),
+    true,
+    false
+  )
+  ch_versions = ch_versions.mix(BOWTIE2_ALIGN.out.versions)
+
+  if (!params.dnase){
+    // trim reads
+    TRIM_READS(
+      BOWTIE2_ALIGN.out.fastq,
+      ligation_site.collect()
+    )
+    ch_versions = ch_versions.mix(TRIM_READS.out.versions)
+
+    // bowtie2 on trimmed reads - save_unaligned=false - sort_bam=false
+    BOWTIE2_ALIGN_TRIMMED(
+      TRIM_READS.out.fastq,
+      index.collect(),
+      false,
+      false
+    )
+    ch_versions = ch_versions.mix(BOWTIE2_ALIGN_TRIMMED.out.versions)
+
+    // Merge the two mapping steps
+    BOWTIE2_ALIGN.out.bam
+      .combine(BOWTIE2_ALIGN_TRIMMED.out.bam, by:[0])
+      .set { ch_bowtie2_align}
+
+    MERGE_BOWTIE2(
+      ch_bowtie2_align
+    )
+    ch_versions = ch_versions.mix(MERGE_BOWTIE2.out.versions)
+    ch_mapping_stats = MERGE_BOWTIE2.out.stats
+    
+    // Combine mates
+    MERGE_BOWTIE2.out.bam
+      .map { singleToPair(it) }
+      .groupTuple()
+      .set {ch_bams}
+
+  }else{
+
+    MAPPING_STATS_DNASE(
+      BOWTIE2_ALIGN.out.bam
+    )
+    ch_mapping_stats = MAPPING_STATS_DNASE.out.stats
+
+    BOWTIE2_ALIGN.out.bam
+      .map { singleToPair(it) }
+      .groupTuple()
+      .set {ch_bams}
+  }
+
+  COMBINE_MATES (
+    ch_bams
+  )
+  ch_versions = ch_versions.mix(COMBINE_MATES.out.versions)
+
+  emit:
+  versions = ch_versions
+  bam = COMBINE_MATES.out.bam
+  mapstats = ch_mapping_stats
+  pairstats = COMBINE_MATES.out.stats
+}
diff --git a/subworkflows/local/input_check.nf b/subworkflows/local/input_check.nf
index 0aecf87fb7813dbcfdd8623e20439e7e1e71b252..3f21f1f2c5a6a82fc781647724c6db3b574813e0 100644
--- a/subworkflows/local/input_check.nf
+++ b/subworkflows/local/input_check.nf
@@ -9,36 +9,64 @@ workflow INPUT_CHECK {
     samplesheet // file: /path/to/samplesheet.csv
 
     main:
-    SAMPLESHEET_CHECK ( samplesheet )
+    if (params.split_fastq){
+
+      SAMPLESHEET_CHECK ( samplesheet )
         .csv
         .splitCsv ( header:true, sep:',' )
-        .map { create_fastq_channel(it) }
+	.map { create_fastq_channels(it) }
+	.splitFastq( by: params.fastq_chunks_size, pe:true, file: true, compress:true)
+	.map { it -> [it[0], [it[1], it[2]]]}
+	.groupTuple(by: [0])
+        .flatMap { it -> setMetaChunk(it) }
+        .collate(2)
+	//.map { it ->
+	//  def meta = it[0].clone()
+	//  meta.chunk = it[1].baseName - ~/.fastq(.gz)?/
+	//  return [meta, [it[1], it[2]]]
+	//}
+        .set { reads }
+
+    }else{
+      SAMPLESHEET_CHECK ( samplesheet )
+      	.csv
+        .splitCsv ( header:true, sep:',' )
+        .map { create_fastq_channels(it) }
+	.map { it -> [it[0], [it[1], it[2]]]}
+	.groupTuple(by: [0])
+        .flatMap { it -> setMetaChunk(it) }
+        .collate(2)
         .set { reads }
+   }
 
     emit:
-    reads                                     // channel: [ val(meta), [ reads ] ]
-    versions = SAMPLESHEET_CHECK.out.versions // channel: [ versions.yml ]
+    reads // channel: [ val(meta), [ reads ] ]
 }
 
 // Function to get list of [ meta, [ fastq_1, fastq_2 ] ]
-def create_fastq_channel(LinkedHashMap row) {
-    // create meta map
-    def meta = [:]
-    meta.id         = row.sample
-    meta.single_end = row.single_end.toBoolean()
-
-    // add path(s) of the fastq file(s) to the meta map
-    def fastq_meta = []
-    if (!file(row.fastq_1).exists()) {
-        exit 1, "ERROR: Please check input samplesheet -> Read 1 FastQ file does not exist!\n${row.fastq_1}"
-    }
-    if (meta.single_end) {
-        fastq_meta = [ meta, [ file(row.fastq_1) ] ]
-    } else {
-        if (!file(row.fastq_2).exists()) {
-            exit 1, "ERROR: Please check input samplesheet -> Read 2 FastQ file does not exist!\n${row.fastq_2}"
-        }
-        fastq_meta = [ meta, [ file(row.fastq_1), file(row.fastq_2) ] ]
-    }
-    return fastq_meta
+def create_fastq_channels(LinkedHashMap row) {
+  def meta = [:]
+  meta.id = row.sample
+  meta.single_end = false
+
+  def array = []
+  if (!file(row.fastq_1).exists()) {
+    exit 1, "ERROR: Please check input samplesheet -> Read 1 FastQ file does not exist!\n${row.fastq_1}"
+  }
+  if (!file(row.fastq_2).exists()) {
+    exit 1, "ERROR: Please check input samplesheet -> Read 2 FastQ file does not exist!\n${row.fastq_2}"
+  }
+  array = [ meta, file(row.fastq_1), file(row.fastq_2) ]
+  return array
 }
+
+// Set the meta.chunk value in case of technical replicates
+def setMetaChunk(row){
+  def map = []
+  row[1].eachWithIndex() { file,i ->
+    meta = row[0].clone()
+    meta.chunk = i
+    map += [meta, file]
+  }
+  return map
+}
\ No newline at end of file
diff --git a/subworkflows/local/prepare_genome.nf b/subworkflows/local/prepare_genome.nf
new file mode 100644
index 0000000000000000000000000000000000000000..a4a2399303570928d763612bd03d88bd4b485f3b
--- /dev/null
+++ b/subworkflows/local/prepare_genome.nf
@@ -0,0 +1,69 @@
+/*
+ * Prepare Annotation Genome for Hi-C data analysis
+ */
+
+include { BOWTIE2_BUILD } from '../../modules/nf-core/bowtie2/build/main'
+include { CUSTOM_GETCHROMSIZES } from '../../modules/nf-core/custom/getchromsizes/main'
+include { GET_RESTRICTION_FRAGMENTS } from '../../modules/local/hicpro/get_restriction_fragments'
+
+workflow PREPARE_GENOME {
+
+  take:
+  fasta
+  restriction_site
+
+  main:
+  ch_versions = Channel.empty()
+
+  //***************************************
+  // Bowtie Index
+  if(!params.bwt2_index){
+    BOWTIE2_BUILD (
+      fasta
+    )
+    ch_index = BOWTIE2_BUILD.out.index
+    ch_versions = ch_versions.mix(BOWTIE2_BUILD.out.versions)
+  }else{
+    Channel.fromPath( params.bwt2_index , checkIfExists: true)
+           .map { it -> [[:], it]}
+           .ifEmpty { exit 1, "Genome index: Provided index not found: ${params.bwt2_index}" }
+           .set { ch_index }
+  }
+
+  //***************************************
+  // Chromosome size
+  if(!params.chromosome_size){
+    CUSTOM_GETCHROMSIZES(
+      fasta
+    )
+    ch_chromsize = CUSTOM_GETCHROMSIZES.out.sizes
+    ch_versions = ch_versions.mix(CUSTOM_GETCHROMSIZES.out.versions)
+  }else{
+    Channel.fromPath( params.chromosome_size , checkIfExists: true)
+           .map { it -> [[:], it]}
+           .set {ch_chromsize} 
+  }
+
+  //***************************************
+  // Restriction fragments
+  if(!params.restriction_fragments && !params.dnase){
+    GET_RESTRICTION_FRAGMENTS(
+      fasta,
+      restriction_site
+    )
+    ch_resfrag = GET_RESTRICTION_FRAGMENTS.out.results
+    ch_versions = ch_versions.mix(GET_RESTRICTION_FRAGMENTS.out.versions)
+  }else if (!params.dnase){
+     Channel.fromPath( params.restriction_fragments, checkIfExists: true )
+            .map{ it -> [[:], it] }
+            .set {ch_resfrag}
+  }else{
+    ch_resfrag = Channel.empty()
+  }
+
+  emit:
+  index = ch_index
+  chromosome_size = ch_chromsize
+  res_frag = ch_resfrag
+  versions = ch_versions
+}
diff --git a/subworkflows/local/tads.nf b/subworkflows/local/tads.nf
new file mode 100644
index 0000000000000000000000000000000000000000..31c1e38b03b8f360c25014e92767f3d9705c434f
--- /dev/null
+++ b/subworkflows/local/tads.nf
@@ -0,0 +1,28 @@
+include { COOLTOOLS_INSULATION } from '../../modules/local/cooltools/insulation'
+include { HIC_FIND_TADS } from '../../modules/local/hicexplorer/hicFindTADs'
+
+workflow TADS {
+
+  take:
+  cool
+
+  main:
+  ch_versions = Channel.empty()
+  ch_tads = Channel.empty()
+
+  if (params.tads_caller =~ 'insulation'){
+    COOLTOOLS_INSULATION(cool)
+    ch_versions = ch_versions.mix(COOLTOOLS_INSULATION.out.versions)
+    ch_tads = ch_tads.mix(COOLTOOLS_INSULATION.out.tsv)
+  }
+  
+  if (params.tads_caller =~ 'hicexplorer'){
+    HIC_FIND_TADS(cool)
+    ch_versions = ch_versions.mix(HIC_FIND_TADS.out.versions)
+    ch_tads = ch_tads.mix(HIC_FIND_TADS.out.results)
+  }
+
+  emit:
+  tads = ch_tads
+  versions = ch_versions
+}
\ No newline at end of file
diff --git a/workflows/hic.nf b/workflows/hic.nf
index ebc77555aaefdb80b40a3f271e2c2dfceb0b2105..2ffa5b4dc8bee033473cab5b755befe9460ca0a9 100644
--- a/workflows/hic.nf
+++ b/workflows/hic.nf
@@ -9,14 +9,76 @@ def summary_params = NfcoreSchema.paramsSummaryMap(workflow, params)
 // Validate input parameters
 WorkflowHic.initialise(params, log)
 
-// TODO nf-core: Add all file path parameters for the pipeline to the list below
 // Check input path parameters to see if they exist
-def checkPathParamList = [ params.input, params.multiqc_config, params.fasta ]
+def checkPathParamList = [ params.input ]
+checkPathParamList = [
+    params.input, params.multiqc_config,
+    params.fasta, params.bwt2_index
+]
+
 for (param in checkPathParamList) { if (param) { file(param, checkIfExists: true) } }
 
 // Check mandatory parameters
 if (params.input) { ch_input = file(params.input) } else { exit 1, 'Input samplesheet not specified!' }
 
+//*****************************************
+// Digestion parameters
+if (params.digestion){
+  restriction_site = params.digestion ? params.digest[ params.digestion ].restriction_site ?: false : false
+  ch_restriction_site = Channel.value(restriction_site)
+  ligation_site = params.digestion ? params.digest[ params.digestion ].ligation_site ?: false : false
+  ch_ligation_site = Channel.value(ligation_site)
+}else if (params.restriction_site && params.ligation_site){
+  ch_restriction_site = Channel.value(params.restriction_site)
+  ch_ligation_site = Channel.value(params.ligation_site)
+}else if (params.dnase){
+  ch_restriction_site = Channel.empty()
+  ch_ligation_site = Channel.empty()
+}else{
+   exit 1, "Ligation motif not found. Please either use the `--digestion` parameters or specify the `--restriction_site` and `--ligation_site`. For DNase Hi-C, please use '--dnase' option"
+}
+
+//****************************************
+// Combine all maps resolution for downstream analysis
+
+ch_map_res = Channel.from( params.bin_size ).splitCsv().flatten().toInteger()
+
+if (params.res_zoomify){
+  ch_zoom_res = Channel.from( params.res_zoomify ).splitCsv().flatten().toInteger()
+  ch_map_res = ch_map_res.concat(ch_zoom_res)
+}
+
+if (params.res_tads && !params.skip_tads){
+  ch_tads_res = Channel.from( "${params.res_tads}" ).splitCsv().flatten().toInteger()
+  ch_map_res = ch_map_res.concat(ch_tads_res)
+}else{
+  ch_tads_res=Channel.empty()
+  if (!params.skip_tads){
+    log.warn "[nf-core/hic] Hi-C resolution for TADs calling not specified. See --res_tads" 
+  }
+}
+
+if (params.res_dist_decay && !params.skip_dist_decay){
+  ch_ddecay_res = Channel.from( "${params.res_dist_decay}" ).splitCsv().flatten().toInteger()
+  ch_map_res = ch_map_res.concat(ch_ddecay_res)
+}else{
+  ch_ddecay_res = Channel.empty()
+  if (!params.skip_dist_decay){
+    log.warn "[nf-core/hic] Hi-C resolution for distance decay not specified. See --res_dist_decay" 
+  }
+}
+
+if (params.res_compartments && !params.skip_compartments){
+  ch_comp_res = Channel.from( "${params.res_compartments}" ).splitCsv().flatten().toInteger()
+  ch_map_res = ch_map_res.concat(ch_comp_res)
+}else{
+  ch_comp_res = Channel.empty()
+  if (!params.skip_compartments){
+    log.warn "[nf-core/hic] Hi-C resolution for compartment calling not specified. See --res_compartments" 
+  }
+}
+
+ch_map_res = ch_map_res.unique()
 /*
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     CONFIG FILES
@@ -34,10 +96,21 @@ ch_multiqc_custom_methods_description = params.multiqc_methods_description ? fil
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 */
 
+//
+// MODULE: Local to the pipeline
+//
+include { HIC_PLOT_DIST_VS_COUNTS } from '../modules/local/hicexplorer/hicPlotDistVsCounts' 
+include { MULTIQC } from '../modules/local/multiqc'
+
 //
 // SUBWORKFLOW: Consisting of a mix of local and nf-core/modules
 //
 include { INPUT_CHECK } from '../subworkflows/local/input_check'
+include { PREPARE_GENOME } from '../subworkflows/local/prepare_genome'
+include { HICPRO } from '../subworkflows/local/hicpro'
+include { COOLER } from '../subworkflows/local/cooler'
+include { COMPARTMENTS } from '../subworkflows/local/compartments'
+include { TADS } from '../subworkflows/local/tads'
 
 /*
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -49,9 +122,19 @@ include { INPUT_CHECK } from '../subworkflows/local/input_check'
 // MODULE: Installed directly from nf-core/modules
 //
 include { FASTQC                      } from '../modules/nf-core/fastqc/main'
-include { MULTIQC                     } from '../modules/nf-core/multiqc/main'
 include { CUSTOM_DUMPSOFTWAREVERSIONS } from '../modules/nf-core/custom/dumpsoftwareversions/main'
 
+/*
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  CHANNELS
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+*/
+
+Channel.fromPath( params.fasta )
+       .ifEmpty { exit 1, "Genome index: Fasta file not found: ${params.fasta}" }
+       .map{it->[[:],it]}
+       .set { ch_fasta }
+
 /*
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     RUN MAIN WORKFLOW
@@ -63,50 +146,134 @@ def multiqc_report = []
 
 workflow HIC {
 
-    ch_versions = Channel.empty()
+  ch_versions = Channel.empty()
 
-    //
-    // SUBWORKFLOW: Read in samplesheet, validate and stage input files
-    //
-    INPUT_CHECK (
-        ch_input
-    )
-    ch_versions = ch_versions.mix(INPUT_CHECK.out.versions)
+  //
+  // SUBWORKFLOW: Read in samplesheet, validate and stage input files
+  //
+  INPUT_CHECK (
+    ch_input
+  )
+
+  //
+  // SUBWORKFLOW: Prepare genome annotation
+  //
+  PREPARE_GENOME(
+    ch_fasta,
+    ch_restriction_site
+  )
+  ch_versions = ch_versions.mix(PREPARE_GENOME.out.versions)
+
+  //
+  // MODULE: Run FastQC
+  //
+  FASTQC (
+    INPUT_CHECK.out.reads
+  )
+  ch_versions = ch_versions.mix(FASTQC.out.versions)
 
-    //
-    // MODULE: Run FastQC
-    //
-    FASTQC (
-        INPUT_CHECK.out.reads
+  //
+  // SUB-WORFLOW: HiC-Pro
+  //
+  INPUT_CHECK.out.reads.view()
+  HICPRO (
+    INPUT_CHECK.out.reads,
+    PREPARE_GENOME.out.index,
+    PREPARE_GENOME.out.res_frag,
+    PREPARE_GENOME.out.chromosome_size,
+    ch_ligation_site,
+    ch_map_res
+  )
+  ch_versions = ch_versions.mix(HICPRO.out.versions)
+
+  //
+  // SUB-WORKFLOW: COOLER
+  //
+  COOLER (
+    HICPRO.out.pairs,
+    PREPARE_GENOME.out.chromosome_size,
+    ch_map_res
+  )
+  ch_versions = ch_versions.mix(COOLER.out.versions)
+
+  //
+  // MODULE: HICEXPLORER/HIC_PLOT_DIST_VS_COUNTS
+  //
+  if (!params.skip_dist_decay){
+    COOLER.out.cool
+      .combine(ch_ddecay_res)
+      .filter{ it[0].resolution == it[2] }
+      .map { it -> [it[0], it[1]]}
+      .set{ ch_distdecay }
+
+    HIC_PLOT_DIST_VS_COUNTS(
+      ch_distdecay
     )
-    ch_versions = ch_versions.mix(FASTQC.out.versions.first())
+    ch_versions = ch_versions.mix(HIC_PLOT_DIST_VS_COUNTS.out.versions)
+  }
+
+  //
+  // SUB-WORKFLOW: COMPARTMENT CALLING
+  //
+  if (!params.skip_compartments){
+    COOLER.out.cool
+      .combine(ch_comp_res)
+      .filter{ it[0].resolution == it[2] }
+      .map { it -> [it[0], it[1], it[2]]}
+      .set{ ch_cool_compartments }
 
-    CUSTOM_DUMPSOFTWAREVERSIONS (
-        ch_versions.unique().collectFile(name: 'collated_versions.yml')
+    COMPARTMENTS (
+      ch_cool_compartments,
+      ch_fasta,
+      PREPARE_GENOME.out.chromosome_size
     )
+    ch_versions = ch_versions.mix(COMPARTMENTS.out.versions)
+  }
 
-    //
-    // MODULE: MultiQC
-    //
-    workflow_summary    = WorkflowHic.paramsSummaryMultiqc(workflow, summary_params)
-    ch_workflow_summary = Channel.value(workflow_summary)
-
-    methods_description    = WorkflowHic.methodsDescriptionText(workflow, ch_multiqc_custom_methods_description)
-    ch_methods_description = Channel.value(methods_description)
-
-    ch_multiqc_files = Channel.empty()
-    ch_multiqc_files = ch_multiqc_files.mix(ch_workflow_summary.collectFile(name: 'workflow_summary_mqc.yaml'))
-    ch_multiqc_files = ch_multiqc_files.mix(ch_methods_description.collectFile(name: 'methods_description_mqc.yaml'))
-    ch_multiqc_files = ch_multiqc_files.mix(CUSTOM_DUMPSOFTWAREVERSIONS.out.mqc_yml.collect())
-    ch_multiqc_files = ch_multiqc_files.mix(FASTQC.out.zip.collect{it[1]}.ifEmpty([]))
-
-    MULTIQC (
-        ch_multiqc_files.collect(),
-        ch_multiqc_config.toList(),
-        ch_multiqc_custom_config.toList(),
-        ch_multiqc_logo.toList()
+  //
+  // SUB-WORKFLOW : TADS CALLING
+  //
+  if (!params.skip_tads){
+    COOLER.out.cool
+      .combine(ch_tads_res)
+      .filter{ it[0].resolution == it[2] }
+      .map { it -> [it[0], it[1]]}
+      .set{ ch_cool_tads }
+                                                                                                                                                                                                            
+    TADS(
+      ch_cool_tads
     )
-    multiqc_report = MULTIQC.out.report.toList()
+    ch_versions = ch_versions.mix(TADS.out.versions)
+  }
+
+  //
+  // SOFTWARE VERSION
+  //
+  CUSTOM_DUMPSOFTWAREVERSIONS(
+    ch_versions.unique().collectFile(name: 'collated_versions.yml')
+  )
+
+  //
+  // MODULE: MultiQC
+  //
+  workflow_summary    = WorkflowHic.paramsSummaryMultiqc(workflow, summary_params)
+  ch_workflow_summary = Channel.value(workflow_summary)
+
+  ch_multiqc_files = Channel.empty()
+  ch_multiqc_files = ch_multiqc_files.mix(ch_multiqc_config)
+  ch_multiqc_files = ch_multiqc_files.mix(ch_multiqc_custom_config.collect().ifEmpty([]))
+  ch_multiqc_files = ch_multiqc_files.mix(ch_workflow_summary.collectFile(name: 'workflow_summary_mqc.yaml'))
+  ch_multiqc_files = ch_multiqc_files.mix(FASTQC.out.zip.map{it->it[1]})
+  ch_multiqc_files = ch_multiqc_files.mix(HICPRO.out.mqc)
+
+  MULTIQC (
+    ch_multiqc_config,
+    ch_multiqc_custom_config.collect().ifEmpty([]),
+    ch_workflow_summary.collectFile(name: 'workflow_summary_mqc.yaml'),
+    FASTQC.out.zip.map{it->it[1]},
+    HICPRO.out.mqc.collect()
+  )
+  multiqc_report = MULTIQC.out.report.toList()
 }
 
 /*