diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 53f847861047be018e469f25039e10383cce8071..62ad6c259efad78784730845c6e83ac9e7b158ab 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -9,7 +9,9 @@ Please use the pre-filled template to save time.
 However, don't be put off by this template - other more general issues and suggestions are welcome!
 Contributions to the code are even more welcome ;)
 
-> If you need help using or modifying nf-core/hic then the best place to ask is on the nf-core Slack [#hic](https://nfcore.slack.com/channels/hic) channel ([join our Slack here](https://nf-co.re/join/slack)).
+> If you need help using or modifying nf-core/hic then the best place to ask is on the nf-core
+Slack [#hic](https://nfcore.slack.com/channels/hic) channel ([join our Slack here](https://nf-co.re/join/slack)).
+
 
 ## Contribution workflow
 
@@ -53,5 +55,8 @@ These tests are run both with the latest available version of `Nextflow` and als
 * A PR should be made on `master` from patch to directly this particular bug.
 
 ## Getting help
+For further information/help, please consult the [nf-core/hic documentation](https://nf-co.re/nf-core/hic/docs) and
+don't hesitate to get in touch on the nf-core Slack [#hic](https://nfcore.slack.com/channels/hic) channel
+([join our Slack here](https://nf-co.re/join/slack)).
 
 For further information/help, please consult the [nf-core/hic documentation](https://nf-co.re/hic/usage) and don't hesitate to get in touch on the nf-core Slack [#hic](https://nfcore.slack.com/channels/hic) channel ([join our Slack here](https://nf-co.re/join/slack)).
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
index 2e01a5fe11f6ed4f3e5bfb4bcaff8c8b7bdc56d5..1a92849358ce849acd440d8a6806aeeb8a8e92fc 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -11,6 +11,7 @@ Hi there!
 
 Thanks for suggesting a new feature for the pipeline!
 Please delete this text and anything that's not relevant from the template below:
+
 -->
 
 ## Is your feature request related to a problem? Please describe
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 42a3a71112e2f53a9773799ae155809a3e3a3e46..0bdd57579b660e0b42eaa248a2b5e1a163ed95e3 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -18,3 +18,4 @@ Learn more about contributing: [CONTRIBUTING.md](https://github.com/nf-core/hic/
 - [ ] If you've fixed a bug or added code that should be tested, add tests!
 - [ ] Documentation in `docs` is updated
 - [ ] If necessary, also make a PR on the [nf-core/hic branch on the nf-core/test-datasets repo](https://github.com/nf-core/test-datasets/pull/new/nf-core/hic)
+
diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml
index e63ea697aa1c0b27438360e3186e6ddaf2b4afee..879955ba790402ab7ada28cc54320644e27ec0cb 100644
--- a/.github/workflows/awsfulltest.yml
+++ b/.github/workflows/awsfulltest.yml
@@ -23,7 +23,6 @@ jobs:
       - name: Install awscli
         run: conda install -c conda-forge awscli
       - name: Start AWS batch job
-        # TODO nf-core: You can customise AWS full pipeline tests as required
         # Add full size test data (but still relatively small datasets for few samples)
         # on the `test_full.config` test runs with only one set of parameters
         # Then specify `-profile test_full` instead of `-profile test` on the AWS batch command
@@ -40,4 +39,4 @@ jobs:
             --job-name nf-core-hic \
             --job-queue $AWS_JOB_QUEUE \
             --job-definition $AWS_JOB_DEFINITION \
-            --container-overrides '{"command": ["nf-core/hic", "-r '"${GITHUB_SHA}"' -profile test --outdir s3://'"${AWS_S3_BUCKET}"'/hic/results-'"${GITHUB_SHA}"' -w s3://'"${AWS_S3_BUCKET}"'/hic/work-'"${GITHUB_SHA}"' -with-tower"], "environment": [{"name": "TOWER_ACCESS_TOKEN", "value": "'"$TOWER_ACCESS_TOKEN"'"}]}'
+            --container-overrides '{"command": ["nf-core/hic", "-r '"${GITHUB_SHA}"' -profile test_full --outdir s3://'"${AWS_S3_BUCKET}"'/hic/results-'"${GITHUB_SHA}"' -w s3://'"${AWS_S3_BUCKET}"'/hic/work-'"${GITHUB_SHA}"' -with-tower"], "environment": [{"name": "TOWER_ACCESS_TOKEN", "value": "'"$TOWER_ACCESS_TOKEN"'"}]}'
diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml
index 92dfa0ddccbebce1ffae4aca4d631f295cb0e601..ee179a28749abcadb808634e8f682575513f8a99 100644
--- a/.github/workflows/awstest.yml
+++ b/.github/workflows/awstest.yml
@@ -20,7 +20,6 @@ jobs:
       - name: Install awscli
         run: conda install -c conda-forge awscli
       - name: Start AWS batch job
-        # TODO nf-core: You can customise CI pipeline run tests as required
         # For example: adding multiple test runs with different parameters
         # Remember that you can parallelise this by using strategy.matrix
         env:
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 5553430d3c1f667b2dc0462cf709859df1b1d903..f7e6e00ae0bbef6fc886b20d0e30c766931cfe88 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -50,8 +50,5 @@ jobs:
           sudo mv nextflow /usr/local/bin/
 
       - name: Run pipeline with test data
-        # TODO nf-core: You can customise CI pipeline run tests as required
-        # For example: adding multiple test runs with different parameters
-        # Remember that you can parallelise this by using strategy.matrix
         run: |
           nextflow run ${GITHUB_WORKSPACE} -profile test,docker
diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml
index bef81e61905d8f265c06b7001c3c17d56d03ba4a..6367daba8be05f3cb37649750dad86f8dc88a3c6 100644
--- a/.github/workflows/linting.yml
+++ b/.github/workflows/linting.yml
@@ -33,10 +33,8 @@ jobs:
   nf-core:
     runs-on: ubuntu-latest
     steps:
-
       - name: Check out pipeline code
         uses: actions/checkout@v2
-
       - name: Install Nextflow
         env:
           CAPSULE_LOG: none
@@ -75,3 +73,4 @@ jobs:
             lint_results.md
             PR_number.txt
 
+
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d11259a2ad55442ea006ced96d7821314349d9e8..6be2eab8616b01e97266a50938accaf430f00609 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,14 +3,113 @@
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
-## v1.3.0dev - [date]
+## v1.3.0dev - 2020-11-01
 
-Initial release of nf-core/hic, created with the [nf-core](https://nf-co.re/) template.
+* Template update for nf-core/tools v1.11
+* Minor fix to summary log messages in pipeline header
+
+## v1.2.2 - 2020-09-02
+
+### `Added`
+
+* Template update for nf-core/tools v1.10.2
+* Add the `--fastq_chunks_size` to specify the number of reads per chunks if split_fastq is true
+
+### `Fixed`
+
+* Bug in `--split_fastq` option not recognized
+
+## v1.2.1 - 2020-07-06
+
+### `Fixed`
+
+* Fix issue with `--fasta` option and `.fa` extension (#66)
+
+## v1.2.0 - 2020-06-18
 
 ### `Added`
 
+* Bump v1.2.0
+* Merge template nf-core 1.9
+* Move some options to camel_case
+* Update python scripts for python3
+* Update conda environment file
+  * python base `2.7.15` > `3.7.6`
+  * pip `19.1` > `20.0.1`
+  * scipy `1.2.1` > `1.4.1`
+  * numpy `1.16.3` > `1.18.1`
+  * bx-python `0.8.2` > `0.8.8`
+  * pysam `0.15.2` > `0.15.4`
+  * cooler `0.8.5` > `0.8.6`
+  * multiqc `1.7` > `1.8`
+  * iced `0.5.1` > `0.5.6`
+  * *_New_* pymdown-extensions `7.1`
+  * *_New_* hicexplorer `3.4.3`
+  * *_New_* bioconductor-hitc `1.32.0`
+  * *_New_* r-optparse `1.6.6`
+  * *_New_* ucsc-bedgraphtobigwig `377`
+  * *_New_* cython `0.29.19`
+  * *_New_* cooltools `0.3.2`
+  * *_New_* fanc `0.8.30`
+  * *_Removed_* r-markdown
+
 ### `Fixed`
 
-### `Dependencies`
+* Fix error in doc for Arima kit usage
+* Sort output of `get_valid_interaction` process as the input files of `remove_duplicates`
+are expected to be sorted (sort -m)
 
 ### `Deprecated`
+
+* Command line options converted to `camel_case`:
+  * `--skipMaps` > `--skip_maps`
+  * `--skipIce` > `--skip_ice`
+  * `--skipCool` > `--skip_cool`
+  * `--skipMultiQC` > `--skip_multiqc`
+  * `--saveReference` > `--save_reference`
+  * `--saveAlignedIntermediates` > `--save_aligned_intermediates`
+  * `--saveInteractionBAM` > `--save_interaction_bam`
+
+## v1.1.1 - 2020-04-02
+
+### `Fixed`
+
+* Fix bug in tag. Remove '['
+
+## v1.1.0 - 2019-10-15
+
+### `Added`
+
+* Update hicpro2higlass with `-p` parameter
+* Support 'N' base motif in restriction/ligation sites
+* Support multiple restriction enzymes/ligattion sites (comma separated) ([#31](https://github.com/nf-core/hic/issues/31))
+* Add --saveInteractionBAM option
+* Add DOI ([#29](https://github.com/nf-core/hic/issues/29))
+* Update manual ([#28](https://github.com/nf-core/hic/issues/28))
+
+### `Fixed`
+
+* Fix bug for reads extension `_1`/`_2` ([#30](https://github.com/nf-core/hic/issues/30))
+
+## v1.0 - [2019-05-06]
+
+Initial release of nf-core/hic, created with the [nf-core](http://nf-co.re/) template.
+
+### `Added`
+
+First version of nf-core Hi-C pipeline which is a Nextflow implementation of
+the [HiC-Pro pipeline](https://github.com/nservant/HiC-Pro/).
+Note that all HiC-Pro functionalities are not yet all implemented.
+The current version supports most protocols including Hi-C, in situ Hi-C,
+DNase Hi-C, Micro-C, capture-C or HiChip data.
+
+In summary, this version allows :
+
+* Automatic detection and generation of annotation files based on igenomes
+if not provided.
+* Two-steps alignment of raw sequencing reads
+* Reads filtering and detection of valid interaction products
+* Generation of raw contact matrices for a set of resolutions
+* Normalization of the contact maps using the ICE algorithm
+* Generation of cooler file for visualization on [higlass](https://higlass.io/)
+* Quality report based on HiC-Pro MultiQC module
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 405fb1bfd7f0d022a8cee036dab43e72b8af871d..9d68eed2ae8c493a162c2294cdb7e5f229df6283 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -2,11 +2,17 @@
 
 ## Our Pledge
 
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project
+and our community a harassment-free experience for everyone, regardless of
+age, body size, disability, ethnicity, gender identity and expression, level
+of experience, nationality, personal appearance, race, religion, or sexual
+identity and orientation.
 
 ## Our Standards
 
-Examples of behavior that contributes to creating a positive environment include:
+Examples of behavior that contributes to creating a positive environment
+include:
 
 * Using welcoming and inclusive language
 * Being respectful of differing viewpoints and experiences
@@ -16,31 +22,55 @@ Examples of behavior that contributes to creating a positive environment include
 
 Examples of unacceptable behavior by participants include:
 
-* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* The use of sexualized language or imagery and unwelcome sexual attention
+or advances
 * Trolling, insulting/derogatory comments, and personal or political attacks
 * Public or private harassment
-* Publishing others' private information, such as a physical or electronic address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a professional setting
+* Publishing others' private information, such as a physical or electronic
+address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+professional setting
 
 ## Our Responsibilities
 
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
 
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
 
 ## Scope
 
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an
+appointed representative at an online or offline event. Representation of a
+project may be further defined and clarified by project maintainers.
 
 ## Enforcement
 
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team on [Slack](https://nf-co.re/join/slack). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team on
+[Slack](https://nf-co.re/join/slack). The project team will review
+and investigate all complaints, and will respond in a way that it deems
+appropriate to the circumstances. The project team is obligated to maintain
+confidentiality with regard to the reporter of an incident. Further details
+of specific enforcement policies may be posted separately.
 
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
 
 ## Attribution
 
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [https://www.contributor-covenant.org/version/1/4/code-of-conduct/][version]
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.4, available at
+[https://www.contributor-covenant.org/version/1/4/code-of-conduct/][version]
 
 [homepage]: https://contributor-covenant.org
 [version]: https://www.contributor-covenant.org/version/1/4/code-of-conduct/
diff --git a/Dockerfile b/Dockerfile
index 11a4ec81d7a3616c87e27fa3b4b2642125c984a8..422e2e16938a79677dc89d9e0dce16d24d1e9a4c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -2,7 +2,9 @@ FROM nfcore/base:1.12
 LABEL authors="Nicolas Servant" \
       description="Docker image containing all software requirements for the nf-core/hic pipeline"
 
-# Install the conda environment
+## Install gcc for pip iced install
+RUN apt-get update && apt-get install -y gcc g++ && apt-get clean -y
+
 COPY environment.yml /
 RUN conda env create --quiet -f /environment.yml && conda clean -a
 
diff --git a/README.md b/README.md
index 6fba85333790611900fb643833922ec8f6dd2ad9..589bcb3d57346d43649ca7881b90bf30caa1137f 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# ![nf-core/hic](docs/images/nf-core-hic_logo.png)
+# ![nf-core/hic](docs/images/nfcore-hic_logo.png)
 
 **Analysis of Chromosome Conformation Capture data (Hi-C)**.
 
@@ -8,11 +8,37 @@
 
 [![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/)
 [![Docker](https://img.shields.io/docker/automated/nfcore/hic.svg)](https://hub.docker.com/r/nfcore/hic)
+
+[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.2669513.svg)](https://doi.org/10.5281/zenodo.2669513)
 [![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23hic-4A154B?logo=slack)](https://nfcore.slack.com/channels/hic)
 
 ## Introduction
 
-The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible.
+This pipeline is based on the
+[HiC-Pro workflow](https://github.com/nservant/HiC-Pro).
+It was designed to process Hi-C data from raw FastQ files (paired-end Illumina
+data) to normalized contact maps.
+The current version supports most protocols, including digestion protocols as
+well as protocols that do not require restriction enzymes such as DNase Hi-C.
+In practice, this workflow was successfully applied to many data-sets including
+dilution Hi-C, in situ Hi-C, DNase Hi-C, Micro-C, capture-C, capture Hi-C or
+HiChip data.
+
+The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool
+to run tasks across multiple compute infrastructures in a very portable manner.
+It comes with docker / singularity containers making installation trivial and
+results highly reproducible.
+
+## Pipeline summary
+
+1. Mapping using a two steps strategy to rescue reads spanning the ligation
+sites (bowtie2)
+2. Detection of valid interaction products
+3. Duplicates removal
+4. Create genome-wide contact maps at various resolution
+5. Contact maps normalization using the ICE algorithm (iced)
+6. Quality controls and report (MultiQC)
+7. Addition export for visualisation and downstream analysis (cooler)
 
 ## Quick Start
 
@@ -20,18 +46,20 @@ The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool
 
 2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_
 
-3. Download the pipeline and test it on a minimal dataset with a single command:
+3. Download the pipeline and test it on a minimal dataset with a single command
 
     ```bash
     nextflow run nf-core/hic -profile test,<docker/singularity/podman/conda/institute>
     ```
 
-    > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile <institute>` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.
+    > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation)
+    to see if a custom config file to run nf-core pipelines already exists for your Institute.
+    If so, you can simply use `-profile <institute>` in your command.
+    This will enable either `docker` or `singularity` and set the appropriate execution
+    settings for your local compute environment.
 
 4. Start running your own analysis!
 
-    <!-- TODO nf-core: Update the example "typical command" below used to run the pipeline -->
-
     ```bash
     nextflow run nf-core/hic -profile <docker/singularity/podman/conda/institute> --input '*_R{1,2}.fastq.gz' --genome GRCh37
     ```
@@ -42,7 +70,8 @@ See [usage docs](https://nf-co.re/hic/usage) for all of the available options wh
 
 The nf-core/hic pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/hic/usage) and [output](https://nf-co.re/hic/output).
 
-<!-- TODO nf-core: Add a brief overview of what the pipeline does and how it works -->
+For further information or help, don't hesitate to get in touch on [Slack](https://nfcore.slack.com/channels/hic).
+You can join with [this invite](https://nf-co.re/join/slack).
 
 ## Credits
 
@@ -52,18 +81,21 @@ nf-core/hic was originally written by Nicolas Servant.
 
 If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).
 
-For further information or help, don't hesitate to get in touch on the [Slack `#hic` channel](https://nfcore.slack.com/channels/hic) (you can join with [this invite](https://nf-co.re/join/slack)).
+For further information or help, don't hesitate to get in touch on the
+[Slack `#hic` channel](https://nfcore.slack.com/channels/hic)
+(you can join with [this invite](https://nf-co.re/join/slack)).
 
 ## Citation
 
-<!-- TODO nf-core: Add citation for pipeline after first release. Uncomment lines below and update Zenodo doi. -->
-<!-- If you use  nf-core/hic for your analysis, please cite it using the following doi: [10.5281/zenodo.XXXXXX](https://doi.org/10.5281/zenodo.XXXXXX) -->
+If you use nf-core/hic for your analysis, please cite it using the following
+doi: [10.5281/zenodo.2669513](https://doi.org/10.5281/zenodo.2669513)
 
 You can cite the `nf-core` publication as follows:
 
 > **The nf-core framework for community-curated bioinformatics pipelines.**
 >
-> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.
+> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg,
+Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.
 >
 > _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).
 > ReadCube: [Full Access Link](https://rdcu.be/b1GjZ)
diff --git a/assets/nf-core-hic_social_preview.png b/assets/nf-core-hic_social_preview.png
new file mode 100644
index 0000000000000000000000000000000000000000..54784f0201bec3769e57e00a9a4f3c69c64dc055
Binary files /dev/null and b/assets/nf-core-hic_social_preview.png differ
diff --git a/assets/nf-core-hic_social_preview.svg b/assets/nf-core-hic_social_preview.svg
new file mode 100644
index 0000000000000000000000000000000000000000..bc2e2a33b8b1a8342e4a866e940ca35062e6a3b7
--- /dev/null
+++ b/assets/nf-core-hic_social_preview.svg
@@ -0,0 +1,448 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="1280"
+   height="640"
+   viewBox="0 0 338.66666 169.33333"
+   version="1.1"
+   id="svg8"
+   inkscape:version="0.92.3 (2405546, 2018-03-11)"
+   sodipodi:docname="social_preview_image_hic.svg"
+   inkscape:export-filename="social_preview_image.png"
+   inkscape:export-xdpi="96"
+   inkscape:export-ydpi="96">
+  <defs
+     id="defs2">
+    <clipPath
+       id="d">
+      <path
+         inkscape:connector-curvature="0"
+         id="path9"
+         d="M 0,266 H 1022 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="c">
+      <path
+         inkscape:connector-curvature="0"
+         id="path12"
+         d="m 280.17,136.33 -21.5,-21.584 h 61 v 21.584 z" />
+    </clipPath>
+    <linearGradient
+       gradientUnits="userSpaceOnUse"
+       gradientTransform="matrix(47.34875,36.9925,-36.9925,47.34875,344.325,162.1875)"
+       x2="1"
+       id="a">
+      <stop
+         id="stop15"
+         offset="0"
+         stop-color="#0c542a" />
+      <stop
+         id="stop17"
+         offset=".21472"
+         stop-color="#0c542a" />
+      <stop
+         id="stop19"
+         offset=".57995"
+         stop-color="#25af64" />
+      <stop
+         id="stop21"
+         offset=".84663"
+         stop-color="#25af64" />
+      <stop
+         id="stop23"
+         offset="1"
+         stop-color="#25af64" />
+    </linearGradient>
+    <clipPath
+       id="b">
+      <path
+         inkscape:connector-curvature="0"
+         id="path26"
+         d="M 0,266 H 1022 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath202"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path204"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath158"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path160"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath86"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path88"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath94"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path96"
+         d="M 804.509,211 H 968.795 V 114.019 H 804.509 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath110"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path112"
+         d="M 804.597,506 H 968.883 V 409.019 H 804.597 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath126"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path128"
+         d="M 133.598,209 H 297.883 V 112.019 H 133.598 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath142"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path144"
+         d="M 133.686,504 H 297.972 V 407.019 H 133.686 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath54"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path56-6"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath30"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path32"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath202-3"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path204-6"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath158-7"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path160-5"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath86-3"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path88-5"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath94-6"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path96-2"
+         d="M 804.509,211 H 968.795 V 114.019 H 804.509 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath110-9"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path112-1"
+         d="M 804.597,506 H 968.883 V 409.019 H 804.597 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath126-2"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path128-7"
+         d="M 133.598,209 H 297.883 V 112.019 H 133.598 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath142-0"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path144-9"
+         d="M 133.686,504 H 297.972 V 407.019 H 133.686 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath54-3"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path56-60"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <clipPath
+       id="clipPath30-6"
+       clipPathUnits="userSpaceOnUse">
+      <path
+         inkscape:connector-curvature="0"
+         id="path32-2"
+         d="M 0,600 H 1500 V 0 H 0 Z" />
+    </clipPath>
+    <linearGradient
+       gradientUnits="userSpaceOnUse"
+       gradientTransform="matrix(47.34875,36.9925,-36.9925,47.34875,344.325,162.1875)"
+       x2="1"
+       id="a-3">
+      <stop
+         id="stop15-61"
+         offset="0"
+         stop-color="#0c542a" />
+      <stop
+         id="stop17-29"
+         offset=".21472"
+         stop-color="#0c542a" />
+      <stop
+         id="stop19-3"
+         offset=".57995"
+         stop-color="#25af64" />
+      <stop
+         id="stop21-19"
+         offset=".84663"
+         stop-color="#25af64" />
+      <stop
+         id="stop23-4"
+         offset="1"
+         stop-color="#25af64" />
+    </linearGradient>
+    <linearGradient
+       gradientUnits="userSpaceOnUse"
+       gradientTransform="matrix(14.322136,11.189559,-11.189559,14.322136,103.39117,-43.22521)"
+       x2="1"
+       id="f">
+      <stop
+         id="stop12"
+         offset="0"
+         stop-color="#0c542a" />
+      <stop
+         id="stop14"
+         offset=".21472"
+         stop-color="#0c542a" />
+      <stop
+         id="stop16"
+         offset=".57995"
+         stop-color="#25af64" />
+      <stop
+         id="stop18"
+         offset=".84663"
+         stop-color="#25af64" />
+      <stop
+         id="stop20"
+         offset="1"
+         stop-color="#25af64" />
+    </linearGradient>
+  </defs>
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="0.7"
+     inkscape:cx="94.827004"
+     inkscape:cy="267.59341"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1920"
+     inkscape:window-height="1012"
+     inkscape:window-x="1920"
+     inkscape:window-y="759"
+     inkscape:window-maximized="1"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     units="px" />
+  <metadata
+     id="metadata5">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(31.749994,-15.785728)">
+    <flowRoot
+       xml:space="preserve"
+       id="flowRoot308"
+       style="font-style:normal;font-weight:normal;font-size:37.33333206px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none"
+       transform="matrix(0.26458333,0,0,0.26458333,-34.517006,20.683034)"><flowRegion
+         id="flowRegion310"
+         style="font-size:37.33333206px;text-align:center;text-anchor:middle"><rect
+           id="rect312"
+           width="1031.3657"
+           height="101.01524"
+           x="135.36044"
+           y="417.76645"
+           style="font-size:37.33333206px;text-align:center;text-anchor:middle" /></flowRegion><flowPara
+         style="font-size:32px;text-align:center;text-anchor:middle"
+         id="flowPara902">Analysis of Chromosome Conformation Capture data (Hi-C)</flowPara></flowRoot>    <g
+       id="g603"
+       transform="matrix(0.44611981,0,0,0.44611981,44.334855,81.689003)">
+      <flowRoot
+         xml:space="preserve"
+         id="flowRoot1021"
+         style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:40px;line-height:1.25;font-family:'Maven Pro';-inkscape-font-specification:'Maven Pro Bold';letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+         transform="matrix(1.9231376,0,0,1.9231376,-514.12361,-525.99533)"><flowRegion
+           id="flowRegion1023"
+           style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-family:'Maven Pro';-inkscape-font-specification:'Maven Pro Bold'"><rect
+             id="rect1025"
+             width="275.99985"
+             height="102.85306"
+             x="274.76151"
+             y="267.25372"
+             style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-family:'Maven Pro';-inkscape-font-specification:'Maven Pro Bold'" /></flowRegion><flowPara
+           id="flowPara1027"
+           style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-family:'Maven Pro';-inkscape-font-specification:'Maven Pro Bold'">hic</flowPara><flowPara
+           style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-family:'Maven Pro';-inkscape-font-specification:'Maven Pro Bold'"
+           id="flowPara982" /></flowRoot>    </g>
+    <g
+       id="g551"
+       transform="matrix(0.44611981,0,0,0.44611981,44.677261,81.689003)">
+      <path
+         style="fill:#24af63;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path24"
+         d="m 401.03289,-44.148517 v 1.35913 c -0.0378,0 -0.0378,0 -0.0755,0.03775 l -0.67956,-0.566304 c -1.51015,-1.283623 -3.13355,-2.416231 -4.94572,-3.322317 -0.30203,-0.151014 -0.60406,-0.339782 -0.94384,-0.415289 -0.0378,-0.03775 -0.0755,-0.07551 -0.11326,-0.07551 -0.67957,-0.264275 -1.35913,-0.490796 -2.07645,-0.679564 -1.5479,-0.339783 -3.0958,-0.377536 -4.64369,-0.07551 -2.00094,0.41529 -3.77536,1.283623 -5.47428,2.416231 -1.66115,1.132607 -3.1713,2.453983 -4.56818,3.850866 -0.26428,0.264276 -0.26428,0.264276 -0.41529,-0.07551 -0.75507,-1.547897 -1.58565,-3.058041 -2.605,-4.454924 -0.79282,-1.057101 -1.66116,-2.038694 -2.79376,-2.718259 -1.13261,-0.717318 -2.37848,-0.981594 -3.69986,-0.641811 -1.6234,0.377536 -2.94478,1.359129 -4.19065,2.41623 -0.75507,0.566304 -1.43463,1.245869 -2.1142,1.88768 -0.64181,0.566304 -1.24587,1.132608 -1.88768,1.698912 -0.11326,0.11326 -0.18877,0.11326 -0.30203,0 -0.64181,-0.679565 -1.32137,-1.283623 -2.1142,-1.698912 -1.17036,-0.641811 -2.37847,-0.717319 -3.62434,-0.302029 -1.0571,0.339782 -1.96319,0.906086 -2.90703,1.510144 -0.37754,0.226521 -0.71732,0.490797 -1.09485,0.679565 v -0.07551 c 0.0378,-0.07551 0.0378,-0.151014 0.0378,-0.226521 0.0755,-1.661158 0.18877,-3.36007 0.45304,-5.021228 0.37754,-2.30297 0.94384,-4.530432 1.96319,-6.60688 0.75507,-1.547897 1.77442,-2.982534 3.05804,-4.152895 1.69891,-1.547898 3.69985,-2.529491 5.88956,-3.133549 2.37848,-0.679565 4.79471,-0.981593 7.24869,-1.094854 0.9816,-0.03775 1.92544,-0.07551 2.90703,-0.113261 0.4908,0.188768 0.98159,0.302029 1.47239,0.453043 0.71732,0.226522 1.43464,0.453043 2.15195,0.641811 0.37754,0.151015 0.71732,0.264276 1.09486,0.41529 1.39688,0.490797 2.75601,1.132608 3.92637,2.076448 0.30203,0.226521 0.60406,0.490796 0.90609,0.755072 -0.0755,-0.226522 -0.15102,-0.41529 -0.22652,-0.641811 -0.52855,-1.396884 -1.32138,-2.529491 -2.605,-3.322317 -0.52855,-0.339782 -1.09485,-0.566304 -1.66116,-0.868333 0.0378,0 0.11326,0 0.15102,-0.03775 1.69891,-0.302029 3.43557,-0.453043 5.17224,-0.528551 1.47239,-0.07551 2.98253,-0.03775 4.45492,0.113261 1.73667,0.188768 3.43558,0.528551 5.05898,1.132608 2.41623,0.906086 4.49268,2.302969 6.11609,4.341664 1.39688,1.774419 2.30297,3.813113 2.86927,6.002821 0.5663,2.151955 0.79283,4.379418 0.86833,6.60688 -0.0378,0.792825 -0.0378,1.623404 -0.0378,2.453983 z"
+         class="st0" />
+      <path
+         style="fill:#ecdc86;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path26-8"
+         d="m 401.03289,-46.640254 h 0.0378 v 2.491737 h -0.0378 z"
+         class="st4" />
+      <path
+         style="fill:#a0918f;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path28"
+         d="m 387.25283,-82.468416 v 0.03775 h -1.69891 v -0.03775 z"
+         class="st5" />
+      <path
+         style="fill:#24af63;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path30"
+         d="m 388.34768,-11.340642 c 0.94384,-0.377536 1.84993,-0.868332 2.75601,-1.359129 1.0571,-0.641811 2.0387,-1.321376 3.02029,-2.038694 0.83058,-0.604058 1.62341,-1.245869 2.41623,-1.925434 l 1.35913,-1.132608 c 0.0755,-0.07551 0.0755,-0.03775 0.11326,0.03775 0.15102,0.604058 0.26428,1.245869 0.41529,1.88768 0.18877,0.868333 0.30203,1.736665 0.41529,2.604998 0.11326,1.019347 0.15102,2.000941 0.0755,3.020288 -0.0755,1.2458683 -0.30203,2.491737 -0.75507,3.6620985 -0.26428,0.7173183 -0.60406,1.396883 -1.01935,2.0386942 -0.52855,0.8305791 -1.13261,1.585651 -1.88768,2.2274621 -0.86833,0.7928255 -1.84993,1.47239026 -2.90703,2.0386942 -1.39688,0.79282552 -2.90702,1.3591295 -4.45492,1.8121726 -1.47239,0.4530432 -2.98254,0.7550719 -4.49268,1.0193471 -0.41529,0.075507 -0.83058,0.1510144 -1.24587,0.1510144 -0.86833,-0.037754 -1.73666,-0.2265216 -2.56724,-0.5285504 -1.24587,-0.4907967 -2.34073,-1.24586863 -3.58659,-1.81217257 -0.67957,-0.30202876 -1.35913,-0.52855034 -2.07645,-0.56630394 -0.94384,-0.0755072 -1.73667,0.26427518 -2.41623,0.90608631 l -1.47239,1.4723902 c -0.83058,0.8305792 -1.81218,1.3968831 -2.98254,1.5856511 -0.79282,0.1132608 -1.54789,0.075507 -2.34072,-0.037754 -1.09485,-0.1510144 -2.15195,-0.4152896 -3.1713,-0.7173183 -1.51015,-0.4907968 -2.90703,-1.1703615 -4.19065,-2.15195502 -1.20812,-0.9060863 -2.15196,-2.03869418 -2.94478,-3.32231648 -0.79283,-1.3213758 -1.24587,-2.7182589 -1.58565,-4.1906491 -0.15102,-0.6418112 -0.22652,-1.3213759 -0.30203,-1.963187 -0.11326,-0.9438399 -0.15102,-1.8499259 -0.11326,-2.7937659 0.0378,-1.321376 0.15101,-2.604998 0.33978,-3.926374 0.15101,0.151015 0.30203,0.264275 0.41529,0.377536 0.83058,0.755072 1.77442,1.434637 2.75601,2.038694 1.09486,0.641811 2.30297,1.170362 3.54884,1.510144 0.83058,0.226522 1.69891,0.377536 2.56724,0.41529 0.71732,0.07551 1.43464,0.07551 2.15196,0.03775 0.83058,-0.03775 1.69891,-0.11326 2.52949,-0.339782 0.11326,0 0.22652,0 0.30203,-0.03775 0.75507,-0.151014 1.51014,-0.339782 2.26522,-0.566304 0.86833,-0.264275 1.69891,-0.52855 2.52949,-0.830579 0.79282,-0.302028 1.6234,-0.641811 2.41623,-0.981593 0.22652,-0.113261 0.37753,-0.07551 0.5663,0.07551 1.32138,1.019347 2.75601,1.88768 4.30391,2.529491 1.73667,0.679565 3.51108,1.019347 5.36101,0.868333 1.35913,-0.264276 2.64275,-0.604058 3.88862,-1.094855 z"
+         class="st0" />
+      <path
+         style="fill:#ecdc86;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path32-3"
+         d="m 388.34768,-11.340642 c -1.24587,0.490797 -2.52949,0.830579 -3.85087,0.94384 -1.84992,0.151015 -3.62434,-0.188768 -5.36101,-0.868332 -1.54789,-0.604058 -2.98253,-1.472391 -4.30391,-2.529491 -0.18876,-0.151015 -0.33978,-0.188768 -0.5663,-0.07551 -0.79283,0.339783 -1.58565,0.679565 -2.41623,0.981594 -0.83058,0.302029 -1.66116,0.604057 -2.52949,0.830579 -0.75507,0.226522 -1.51014,0.377536 -2.26522,0.566304 -0.11326,0.03775 -0.22652,0.03775 -0.30203,0.03775 0.26428,-0.302029 0.52855,-0.604058 0.79283,-0.906087 1.0571,-1.208115 1.81217,-2.604998 2.22746,-4.190649 0.60406,-2.114201 1.20812,-4.266156 1.73667,-6.418111 0.37753,-1.585651 0.67956,-3.171302 0.90608,-4.794707 0.15102,-1.170361 0.37754,-5.625286 0.30203,-6.682386 -0.18877,-3.24681 -0.90608,-6.342605 -2.22746,-9.325139 -0.79283,-1.774419 -2.15195,-2.982534 -4.03963,-3.47333 -0.83058,-0.226522 -1.66116,-0.151015 -2.45399,0.11326 -0.0755,0.03775 -0.11326,0.07551 -0.18877,0.03775 1.24587,-1.057101 2.52949,-2.038694 4.19065,-2.41623 1.32138,-0.302029 2.56725,-0.07551 3.69986,0.641811 1.1326,0.717318 2.00094,1.661158 2.79376,2.718259 1.01935,1.396883 1.84993,2.907027 2.605,4.454924 0.15101,0.339783 0.15101,0.339783 0.41529,0.07551 1.39688,-1.434636 2.90703,-2.756012 4.56818,-3.850866 1.66116,-1.132608 3.47334,-2.000941 5.47428,-2.416231 1.54789,-0.302028 3.09579,-0.264275 4.64369,0.07551 0.71732,0.151014 1.39688,0.377536 2.07645,0.679564 0.0378,0.03775 0.11326,0.03775 0.11326,0.07551 -2.00094,0.03775 -3.69986,0.792825 -5.24775,1.963187 -0.75508,0.566303 -1.43464,1.208115 -1.96319,2.00094 -0.41529,0.641811 -0.79283,1.35913 -1.09485,2.076448 -0.67957,1.434637 -1.24587,2.94478 -1.66116,4.492678 -0.33978,1.321376 -0.56631,2.680505 -0.67957,4.039635 -0.0755,1.0571 -0.11326,2.114201 -0.0755,3.171302 0.0377,1.283622 0.18876,2.567244 0.33978,3.88862 0.26427,2.151955 0.64181,4.30391 1.01934,6.455865 0.18877,1.170361 0.33979,2.378477 0.56631,3.586592 0.26427,1.736665 1.24587,3.020287 2.64275,4.001881 0,-0.03775 0.0755,0 0.11326,0.03775 z"
+         class="st4" />
+      <path
+         style="fill:#3f2b29;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path34-7"
+         d="m 385.55392,-82.430663 h 1.69891 c 1.35913,0.07551 2.71826,0.302029 4.00188,0.755072 1.01935,0.339782 1.24587,1.019347 0.64181,1.88768 -0.41529,0.604057 -1.01935,1.057101 -1.66116,1.47239 -0.79282,0.52855 -1.66116,0.981594 -2.605,1.321376 -0.94384,0.377536 -1.84992,0 -2.45398,-0.94384 -0.18877,-0.302029 -0.33978,-0.604058 -0.41529,-0.94384 -0.0378,-0.113261 -0.0755,-0.151014 -0.18877,-0.151014 -2.1142,-0.377536 -4.00188,0.113261 -5.54978,1.623404 -1.28362,1.208115 -2.03869,2.756013 -2.56724,4.417171 -0.4908,1.510144 -0.71732,3.020288 -0.79283,4.605939 -0.0755,1.396883 0.0378,2.793766 0.22652,4.152895 0.0378,0.226522 0.11327,0.453044 0.11327,0.717319 0.0378,0.302028 -0.0755,0.566304 -0.30203,0.717318 -0.26428,0.188768 -0.56631,0.151014 -0.86834,0.151014 -0.71731,-0.226521 -1.43463,-0.453043 -2.15195,-0.641811 v -0.490796 c 0,-0.755072 0,-1.472391 0.0378,-2.227463 0.15102,-2.907027 0.60406,-5.7763 1.73667,-8.494559 0.83058,-2.038694 2.03869,-3.813113 3.7376,-5.209996 1.39689,-1.170361 2.98254,-1.925433 4.75696,-2.340723 0.90608,-0.226522 1.73666,-0.339782 2.605,-0.377536 z"
+         class="st6" />
+      <path
+         style="fill:#396e35;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path36"
+         d="m 374.86965,-62.685532 c 0.30203,0 0.60406,0.03775 0.86833,-0.151015 0.26428,-0.188768 0.33978,-0.453043 0.30203,-0.717318 -0.0378,-0.226521 -0.0755,-0.490797 -0.11326,-0.717318 0.15101,0 0.26427,-0.03775 0.41529,-0.03775 0.52855,0.302029 1.09485,0.566304 1.66116,0.868333 1.28362,0.792825 2.07644,1.925433 2.60499,3.322316 0.0755,0.226522 0.15102,0.41529 0.22653,0.641811 -0.30203,-0.264275 -0.60406,-0.52855 -0.90609,-0.755071 -1.20812,-0.906087 -2.52949,-1.547898 -3.92637,-2.076448 -0.41529,-0.113261 -0.75508,-0.226522 -1.13261,-0.377536 z"
+         class="st7" />
+      <path
+         style="fill:#396e35;stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path38-9"
+         d="m 372.71769,-63.81814 v 0.490797 c -0.49079,-0.151015 -0.98159,-0.264275 -1.47239,-0.453043 0.52855,-0.03775 1.01935,-0.03775 1.47239,-0.03775 z"
+         class="st7" />
+    </g>
+    <g
+       id="g596"
+       transform="matrix(0.44611981,0,0,0.44611981,44.677261,81.689003)">
+      <path
+         d="m 150.58729,-13.861192 q -5.8632,0 -10.61714,-2.29774 -4.75394,-2.29774 -7.60631,-6.89322 -2.77314,-4.674713 -2.77314,-11.330235 0,-10.696376 5.70474,-16.163413 5.70473,-5.546269 15.21262,-5.546269 3.32776,0 6.73476,0.713092 3.40699,0.633859 6.02166,1.822345 v 10.141749 q -3.24853,-1.426183 -5.78397,-2.139275 -2.53543,-0.792324 -5.07087,-0.792324 -5.22934,0 -8.16094,2.61467 -2.9316,2.535437 -2.9316,8.002474 0,6.100896 2.6939,9.032495 2.77314,2.931599 8.95327,2.931599 4.67471,0 10.37944,-3.169296 v 10.062516 q -2.85236,1.505416 -5.94243,2.218508 -3.01083,0.792324 -6.81399,0.792324 z"
+         style="font-weight:bold;font-size:medium;line-height:0%;font-family:'Maven Pro';fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path569"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 189.64516,-13.702727 q -10.14175,0 -15.21262,-5.387804 -5.07088,-5.387805 -5.07088,-16.004948 0,-11.092538 4.99164,-16.163413 5.07088,-5.070874 15.29186,-5.070874 10.22098,0 15.37109,5.229339 5.15011,5.150107 5.15011,16.004948 0,10.458679 -5.30858,15.925715 -5.30857,5.467037 -15.21262,5.467037 z m 0,-9.428657 q 4.35778,0 6.57629,-3.010832 2.21851,-3.090064 2.21851,-8.953263 0,-6.417826 -2.21851,-9.111728 -2.13927,-2.773134 -6.57629,-2.773134 -4.59548,0 -6.65552,2.773134 -1.98081,2.693902 -1.98081,9.111728 0,6.100896 2.06004,9.032495 2.13927,2.9316 6.57629,2.9316 z"
+         style="font-weight:bold;font-size:medium;line-height:0%;font-family:'Maven Pro';fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path571"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 218.31492,-50.783497 q 1.18848,-1.030021 4.12008,-2.29774 3.01084,-1.267719 6.65553,-2.139275 3.72392,-0.950789 7.13091,-0.950789 6.57629,0 9.11173,1.98081 v 8.636333 q -3.1693,-0.713091 -9.11173,-0.713091 -3.72392,0 -6.10089,0.396162 v 30.979874 h -11.80563 z"
+         style="font-weight:bold;font-size:medium;line-height:0%;font-family:'Maven Pro';fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path573"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 268.56064,-56.329766 q 18.77809,0 18.54039,24.482816 H 260.6374 q 1.50542,8.319403 11.17177,8.319403 2.9316,0 5.70474,-0.792324 2.85236,-0.871556 6.02166,-2.218507 v 10.141749 q -6.49706,2.693902 -14.57876,2.693902 -6.1009,0 -10.69638,-2.456205 -4.59548,-2.535437 -7.13092,-7.21015 -2.53543,-4.674712 -2.53543,-11.013305 0,-10.458679 5.1501,-16.163413 5.22934,-5.783966 14.81646,-5.783966 z m -0.47539,9.032495 q -6.33859,0 -7.52708,7.923242 h 14.81646 q -0.55462,-4.278551 -2.29774,-6.100896 -1.66388,-1.822346 -4.99164,-1.822346 z"
+         style="font-weight:bold;font-size:medium;line-height:0%;font-family:'Maven Pro';fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path575"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 318.12052,-74.790919 h 8.47787 l -29.15753,67.6644822 h -8.39863 z"
+         style="font-weight:bold;font-size:medium;line-height:0%;font-family:'Maven Pro';fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path577"
+         inkscape:connector-curvature="0" />
+    </g>
+    <g
+       id="g589"
+       transform="matrix(0.44611981,0,0,0.44611981,44.677261,81.689003)">
+      <path
+         d="m 15.436598,-51.575821 q 3.090064,-1.663881 9.428657,-3.090064 6.338593,-1.426184 11.330235,-1.426184 8.477868,0 12.756419,3.327762 4.27855,3.327761 4.27855,10.934073 v 26.939021 h -11.80563 v -24.562049 q 0,-6.89322 -7.764776,-6.89322 -1.743113,0 -3.565459,0.396162 -1.822345,0.396162 -2.852367,0.950789 v 30.108318 H 15.436598 Z"
+         style="font-weight:bold;font-size:79.23241425px;line-height:0%;font-family:'Maven Pro';fill:#24af63;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path559"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 66.729683,-45.95032 h -5.863199 v -9.032495 h 5.942431 v -0.950789 q 0.07923,-9.032495 4.120086,-12.518721 4.120085,-3.565459 11.17177,-3.565459 1.267719,0 2.773134,0.237697 1.505416,0.237697 2.456205,0.554627 v 10.379446 q -0.871556,-0.871556 -1.743113,-1.267718 -0.871556,-0.475395 -2.218507,-0.475395 -2.218508,0 -3.486227,1.188486 -1.267718,1.188487 -1.267718,4.040854 v 2.376972 h 8.715565 v 9.032495 h -8.794798 v 31.059107 H 66.729683 Z"
+         style="font-weight:bold;font-size:79.23241425px;line-height:0%;font-family:'Maven Pro';fill:#24af63;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path561"
+         inkscape:connector-curvature="0" />
+      <path
+         d="m 96.008535,-43.256418 h 24.165885 v 8.160939 H 96.008535 Z"
+         style="font-weight:bold;font-size:79.23241425px;line-height:0%;font-family:'Maven Pro';fill:#24af63;fill-opacity:1;stroke:none;stroke-width:0.37753597"
+         id="path563"
+         inkscape:connector-curvature="0" />
+      <path
+         style="display:inline;fill:url(#f);stroke-width:0.37753597"
+         inkscape:connector-curvature="0"
+         id="path67"
+         d="m 105.17203,-43.255454 -8.129199,8.160957 h 23.064239 v -8.160957 z" />
+    </g>
+  </g>
+</svg>
diff --git a/bin/build_matrix b/bin/build_matrix
new file mode 100755
index 0000000000000000000000000000000000000000..c61c6176c46edf71a6be8dcd3d090c0c1a0b9c4a
Binary files /dev/null and b/bin/build_matrix differ
diff --git a/bin/cutsite_trimming b/bin/cutsite_trimming
new file mode 100755
index 0000000000000000000000000000000000000000..aef62c5802acbb650dc7f60040cfde09f1e9d57f
Binary files /dev/null and b/bin/cutsite_trimming differ
diff --git a/bin/digest_genome.py b/bin/digest_genome.py
new file mode 100755
index 0000000000000000000000000000000000000000..2c29a49e1cf174f12142f78627fd799b83da2788
--- /dev/null
+++ b/bin/digest_genome.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+
+# HiC-Pro
+# Copyleft 2015 Institut Curie
+# Author(s): Nelle Varoquaux, Nicolas Servant
+# Contact: nicolas.servant@curie.fr
+# This software is distributed without any guarantee under the terms of the
+# GNU General
+# Public License, either Version 2, June 1991 or Version 3, June 2007.
+
+"""
+Script to extract restriction fragment from a fasta file and output a BED file
+"""
+
+import argparse
+import re
+import os
+import sys
+import numpy as np
+
+RE_cutsite = {
+    "mboi": ["^GATC"],
+    "dpnii": ["^GATC"],
+    "bglii": ["A^GATCT"],
+    "hindiii": ["A^AGCTT"]}
+
+
+def find_re_sites(filename, sequences, offset):
+    with open(filename, 'r') as infile:
+        chr_id = None
+        big_str = ""
+        indices = []
+        all_indices = []
+        contig_names = []
+        c = 0
+        for line in infile:
+            c += 1
+            if line.startswith(">"):
+                print("{}...".format(line.split()[0][1:]))
+                # If this is not the first chromosome, find the indices and append
+                # them to the list
+                if chr_id is not None:
+                     for rs in range(len(sequences)):
+                         pattern = "(?={})".format(sequences[rs].lower())
+                         indices += [m.start() + offset[rs]\
+                         for m in re.finditer(pattern, big_str)]
+                     indices.sort()
+                     all_indices.append(indices)
+                     indices = []
+
+                # This is a new chromosome. Empty the sequence string, and add the
+                # correct chrom id
+                big_str = ""
+                chr_id = line.split()[0][1:]
+                if chr_id in contig_names:
+                    print("The fasta file contains several instance of {}. Exit.".format(chr_id))
+                    sys.exit(-1)
+                contig_names.append(chr_id)
+            else:
+                # As long as we don't change chromosomes, continue reading the
+                # file, and appending the sequences
+                big_str += line.lower().strip()
+        # Add the indices for the last chromosome
+        for rs in range(len(sequences)):
+            pattern = "(?={})".format(sequences[rs].lower())
+            indices += [m.start() + offset[rs]
+                        for m in re.finditer(pattern, big_str)]
+        indices.sort()
+        all_indices.append(indices)
+    
+    return contig_names, all_indices
+
+
+def find_chromsomose_lengths(reference_filename):
+    chromosome_lengths = []
+    chromosome_names = []
+    length = None
+    with open(reference_filename, 'r') as infile:
+        for line in infile:
+            if line.startswith(">"):
+                chromosome_names.append(line[1:].strip())
+                if length is not None:
+                    chromosome_lengths.append(length)
+                length = 0
+            else:
+                length += len(line.strip())
+        chromosome_lengths.append(length)
+    return chromosome_names, np.array(chromosome_lengths)
+
+
+def replaceN(cs):
+    npos = int(cs.find('N'))
+    cseql = []
+    if npos != -1:
+        for nuc in ["A","C","G","T"]:
+            tmp = cs.replace('N', nuc, 1)
+            tmpl = replaceN(tmp)
+            if type(tmpl) == list:
+                cseql = cseql + tmpl
+            else:
+                cseql.append(tmpl)
+    else:
+        cseql.append(cs)
+    return cseql
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument('fastafile')
+    parser.add_argument('-r', '--restriction_sites',
+                        dest='res_sites',
+                        nargs='+',
+                        help=("The cutting position has to be specified using "
+                              "'^'. For instance, -r A^AGCTT for HindIII "
+                              "digestion. Several restriction enzyme can be "
+                              "specified."))
+    parser.add_argument('-o', '--out', default=None)
+    args = parser.parse_args()
+
+    filename = args.fastafile
+    out = args.out
+    
+    # Split restriction sites if comma-separated
+    cutsites=[]
+    for s in args.res_sites:
+        for m in s.split(','):
+            cutsites.append(m)
+                
+    # process args and get restriction enzyme sequences
+    sequences = []
+    offset = []
+    for cs in cutsites:
+        if cs.lower() in RE_cutsite:
+            cseq = ''.join(RE_cutsite[cs.lower()])
+        else:
+            cseq = cs
+
+        offpos = int(cseq.find('^'))
+        if offpos == -1:
+            print("Unable to detect offset for {}. Please, use '^' to specify the cutting position,\
+                   i.e A^GATCT for HindIII digestion.".format(cseq))
+            sys.exit(-1)
+
+        for nuc in list(set(cs)):
+            if nuc not in ['A','T','G','C','N','^']:
+                print("Find unexpected character ['{}']in restriction motif".format(nuc))
+                print("Note that multiple motifs should be separated by a space (not a comma !)")
+
+                sys.exit(-1)
+
+        offset.append(offpos)
+        sequences.append(re.sub('\^', '', cseq))
+
+    # replace all N in restriction motif
+    sequences_without_N = []
+    offset_without_N = []
+    for rs in range(len(sequences)):
+        nrs = replaceN(sequences[rs])
+        sequences_without_N = sequences_without_N + nrs
+        offset_without_N = offset_without_N + [offset[rs]] * len(nrs)
+          
+    sequences = sequences_without_N
+    offset = offset_without_N
+    
+    if out is None:
+        out = os.path.splitext(filename)[0] + "_fragments.bed"
+
+    print("Analyzing", filename)
+    print("Restriction site(s)", ",".join(sequences))
+    print("Offset(s)",  ','.join(str(x) for x in offset))
+
+    # Read fasta file and look for rs per chromosome
+    contig_names, all_indices = find_re_sites(filename, sequences,  offset=offset)
+    _, lengths = find_chromsomose_lengths(filename)
+
+    valid_fragments = []
+    for i, indices in enumerate(all_indices):
+        valid_fragments_chr = np.concatenate(
+            [np.concatenate([[0], indices])[:, np.newaxis],
+             np.concatenate([indices, [lengths[i]]])[:, np.newaxis]],
+            axis=1)
+        valid_fragments.append(valid_fragments_chr)
+
+    # Write results
+    print("Writing to {} ...".format(out))
+    with open(out, 'w') as outfile:
+        for chrom_name, indices in zip(contig_names, valid_fragments):
+            frag_id = 0
+            for begin, end in indices:
+                # allow to remove cases where the enzyme cut at
+                # the first position of the chromosome
+                if end > begin:
+                    frag_id += 1
+                    frag_name = "HIC_{}_{}".format(str(chrom_name), int(frag_id))
+                    outfile.write("{}\t{}\t{}\t{}\t0\t+\n".format(str(chrom_name), int(begin), int(end), str(frag_name)))
diff --git a/bin/hicpro2higlass.sh b/bin/hicpro2higlass.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ff11aeeb538bbfb06acead2d22646c93d8a567bf
--- /dev/null
+++ b/bin/hicpro2higlass.sh
@@ -0,0 +1,268 @@
+#!/bin/bash
+
+## HiC-Pro           
+## Copyleft 2017 Institut Curie
+## Author(s): Nicolas Servant
+## Contact: nicolas.servant@curie.fr
+## This software is distributed without any guarantee under the terms of the BSD licence
+
+##
+## First version of converter between HiCPro and higlass.
+## The cooler python package should be properly installed, as well as the higlass software
+##
+
+##
+## A few notes about higlass
+##
+## docker run will install the docker image and start it
+## sudo docker run --detach  --publish 8888:80  --volume ~/hg-data:/data --volume ~/hg-tmp:/tmp --name higlass-container  gehlenborglab/higlass
+## sudo docker start higlass-container
+## sudo docker ps -all
+##
+## Once higlass is installed, you can just run it using
+## sudo docker start higlass-container
+## higlass will then be available at http://localhost:8888
+##
+
+###########################
+## trap handler
+###########################
+function trap_error()
+{    
+    echo "Error: $1 - line $2 - exit status of last command: $?. Exit" >&2
+    exit 1
+}
+
+function trap_exit()
+{
+    ##Since bash-4.0 $LINENO is reset to 1 when the trap is triggered
+    if [ "$?" != "0" ]; then
+	echo "Error: exit status detected. Exit." >&2
+    fi
+
+    if [[ ! -z ${tmp_dir} && -e ${tmp_dir} ]]; then 
+	echo -e "Cleaning temporary folders ..." >&2
+	/bin/rm -rf ${tmp_dir}
+    fi
+}
+
+trap 'trap_error "$0" "$LINENO"' ERR
+trap 'trap_exit' 0 1 2 3
+
+set -E ## export trap to functions
+set -o pipefail  ## trace ERR through pipes         
+
+## 0 =
+## 1 >
+## 2 <
+vercomp () {
+    if [[ $1 == $2 ]]
+    then
+        return 0
+    fi
+    local IFS=.
+    local i ver1=($1) ver2=($2)
+    # fill empty fields in ver1 with zeros
+    for ((i=${#ver1[@]}; i<${#ver2[@]}; i++))
+    do
+        ver1[i]=0
+    done
+
+    for ((i=0; i<${#ver1[@]}; i++))
+    do
+        if [[ -z ${ver2[i]} ]]
+        then
+            # fill empty fields in ver2 with zeros
+            ver2[i]=0
+        fi
+        if ((10#${ver1[i]} > 10#${ver2[i]}))
+        then
+            echo 1
+        fi
+        if ((10#${ver1[i]} < 10#${ver2[i]}))
+        then
+            echo 2
+        fi
+    done
+    echo 0
+}
+
+function usage {
+    echo -e "usage : hicpro2higlass -i INPUT -r RESOLUTION -c CHROMSIZE [-n] [-o ODIR] [-t TEMP]  [-h]"
+    echo -e "Use option -h|--help for more information"
+}
+
+function help {
+    usage;
+    echo 
+    echo "Generate Higlass input file from HiC-Pro results"
+    echo "See https://github.com/hms-dbmi/higlass-website for details about Higlass"
+    echo "---------------"
+    echo "OPTIONS"
+    echo
+    echo "   -i|--input INPUT : allValidPairs or matrix file generated by HiC-Pro"
+    echo "   -r|--res RESOLUTION : .matrix file resolution or maximum resolution to reach from the .allValidPairs input file"
+    echo "   -c|--chrom CHROMSIZE : chromosome size file"
+    echo "   -p|--proc NB_CPU : number of CPUs for cooler"
+    echo "   [-n|--norm] : run cooler matrix balancing algorithm"
+    echo "   [-o|--out] : output path. Default is current path"
+    echo "   [-t|--temp] TEMP : path to tmp folder. Default is current path"
+    echo "   [-h|--help]: help"
+    exit;
+}
+
+
+if [ $# -lt 1 ]
+then
+    usage
+    exit
+fi
+
+# Transform long options to short ones
+for arg in "$@"; do
+  shift
+  case "$arg" in
+      "--input") set -- "$@" "-i" ;;
+      "--bed")   set -- "$@" "-b" ;;
+      "--res")   set -- "$@" "-r" ;;
+      "--chrom") set -- "$@" "-c" ;;
+      "--proc") set -- "$@" "-p" ;;
+      "--out") set -- "$@" "-o" ;;
+      "--temp") set -- "$@" "-t" ;;
+      "--norm")   set -- "$@" "-n" ;;
+      "--help")   set -- "$@" "-h" ;;
+       *)        set -- "$@" "$arg"
+  esac
+done
+
+INPUT_HICPRO=""
+INPUT_BED=""
+NORMALIZE=0
+NPROC=1
+CHROMSIZES_FILE=""
+RES=10000
+OUT="./"
+TEMP="./"
+
+while getopts ":i:b:c:p:r:o:t:nh" OPT
+do
+    case $OPT in
+	i) INPUT_HICPRO=$OPTARG;;
+	b) INPUT_BED=$OPTARG;;
+	n) NORMALIZE=1;;
+	c) CHROMSIZES_FILE=$OPTARG;;
+	p) NPROC=$OPTARG;;
+	r) RES=$OPTARG;;
+	o) OUT=$OPTARG;;
+	t) TEMP=$OPTARG;;
+	h) help ;;
+	\?)
+	    echo "Invalid option: -$OPTARG" >&2
+	    usage
+	    exit 1
+	    ;;
+	:)
+	    echo "Option -$OPTARG requires an argument." >&2
+	    usage
+	    exit 1
+	    ;;
+    esac
+done
+
+if [[ -z $INPUT_HICPRO ]];
+then
+    usage
+    exit
+fi
+
+if [[ ! -e $CHROMSIZES_FILE ]]; then
+    echo -e "$CHROMSIZES_FILE file not found. Exit"
+    exit 1
+fi
+
+## Detect input data type
+DATATYPE=""
+if [[ $INPUT_HICPRO == *.mat* ]]; then
+    DATATYPE="MATRIX"
+elif [[ $INPUT_HICPRO == *allValidPairs* || $INPUT_HICPRO == *validPairs* ]]; then
+    DATATYPE="VALID"
+else
+    echo -e "Unknown input data type. Expect .matrix or _allValidPairs input files."
+    exit 1
+fi
+echo -e "$DATATYPE input file detected ..."
+
+## Check cooler version
+which cooler > /dev/null;
+if [ $? != "0" ]; then
+    echo -e "Cooler is not installed or is not in your $PATH. See https://github.com/mirnylab/cooler for details."
+    exit 1;
+fi
+
+COOLER_VERSION=$(cooler --version 2>&1 | awk '{print $NF}')
+echo "Cooler version $COOLER_VERSION detected ..."
+cres=$(vercomp ${COOLER_VERSION} "0.7.6")
+if [[ $cres == "2" ]]; then
+    echo "Cooler version must be >= 0.7.6 ! Stop."
+    exit 1
+fi
+
+if [[ $DATATYPE == "VALID" ]]; then
+    which pairix > /dev/null;
+    if [ $? != "0" ]; then
+	echo -e "Pairix is not installed or is not in your PATH. See https://github.com/4dn-dcic/pairix."
+	exit 1;
+    fi
+fi
+
+echo -e "\nGenerating .cool files ..."
+tmp_dir=${TEMP}/_tmp$$
+mkdir -p $tmp_dir
+
+if [[ $DATATYPE == "MATRIX" ]]; then
+    out=$(basename $INPUT_HICPRO | sed -e 's/.mat.*/.cool/')
+    
+    cooler makebins $CHROMSIZES_FILE $RES > $tmp_dir/bins.bed
+    cooler load -f coo --one-based $tmp_dir/bins.bed $INPUT_HICPRO $tmp_dir/$out
+
+    echo -e "\nZoomify .cool file ..."
+    if [[ $NORMALIZE == 1 ]]; then
+	cooler zoomify --nproc ${NPROC} --balance $tmp_dir/$out
+    else
+	cooler zoomify --nproc ${NPROC} $tmp_dir/$out
+    fi
+    out=$(basename $INPUT_HICPRO | sed -e 's/.mat.*/.mcool/')
+    
+elif [[ $DATATYPE == "VALID" ]]; then
+    out=$(basename $INPUT_HICPRO | sed -e 's/.allValidPairs.*/.cool/')
+
+    awk '{OFS="\t";print $2,$3,$4,$5,$6,$7,1}' $INPUT_HICPRO | sed -e 's/+/1/g' -e 's/-/16/g' > $tmp_dir/contacts.txt
+    cooler csort --nproc ${NPROC} -c1 1 -p1 2 -s1 3 -c2 4 -p2 5 -s2 6 \
+	   -o $tmp_dir/contacts.sorted.txt.gz  \
+	   $tmp_dir/contacts.txt \
+	   $CHROMSIZES_FILE
+    
+    cooler makebins $CHROMSIZES_FILE $RES > $tmp_dir/bins.bed
+    cooler cload pairix --nproc ${NPROC} $tmp_dir/bins.bed $tmp_dir/contacts.sorted.txt.gz $tmp_dir/$out
+
+    echo -e "\nZoomify .cool file ..."
+    if [[ $NORMALIZE == 1 ]]; then
+	cooler zoomify --nproc ${NPROC} --balance $tmp_dir/$out
+    else
+	cooler zoomify --nproc ${NPROC} $tmp_dir/$out
+    fi
+    out=$(basename $INPUT_HICPRO | sed -e 's/.allValidPairs.*/.mcool/')
+fi
+
+## mv to out
+mv $tmp_dir/*cool ${OUT}/
+
+## clean
+/bin/rm -rf $tmp_dir
+
+echo -e "\nCooler file generated with success ..."
+echo "Please copy the file $out in your Higlass input directory and run :"
+echo "sudo docker exec higlass-container python higlass-server/manage.py  ingest_tileset --filename /tmp/$out --datatype matrix --filetype cooler" 
+
+
+
diff --git a/bin/mapped_2hic_dnase.py b/bin/mapped_2hic_dnase.py
new file mode 100755
index 0000000000000000000000000000000000000000..dd023b0023e0c0a7aa4780bcc04289e467ed877b
--- /dev/null
+++ b/bin/mapped_2hic_dnase.py
@@ -0,0 +1,455 @@
+#!/usr/bin/env python
+
+# HiC-Pro
+# Copyleft 2015 Institut Curie
+# Author(s): Nicolas Servant, Eric Viara
+# Contact: nicolas.servant@curie.fr
+# This software is distributed without any guarantee under the terms of the
+# GNU General
+# Public License, either Version 2, June 1991 or Version 3, June 2007.
+
+"""
+Script to keep only valid pairs when no restriction enzyme are used (i.e. DNAse or Micro-HiC)
+"""
+
+import getopt
+import sys
+import os
+import re
+import pysam
+
+
+def usage():
+    """Usage function"""
+    print("Usage : python mapped_2hic_dnase.py")
+    print("-r/--mappedReadsFile <BAM/SAM file of mapped reads>")
+    print("[-o/--outputDir] <Output directory. Default is current directory>")
+    print("[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>")
+    print("[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>")
+    print("[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>")
+    print("[-v/--verbose] <Verbose>")
+    print("[-h/--help] <Help>")
+    return
+
+
+def get_args():
+    """Get argument"""
+    try:
+        opts, args = getopt.getopt(
+            sys.argv[1:],
+            "r:o:d:g:avh",
+            ["mappedReadsFile=",
+             "outputDir=", "minDist=", "gatg", "all", "verbose", "help"])
+    except getopt.GetoptError:
+        usage()
+        sys.exit(-1)
+    return opts
+
+
+def get_read_strand(read):
+    """
+    Conversion of read position to naive strand representation
+
+    Parameters
+    ----------
+    read : list
+        list of aligned reads
+    """
+    strand = "+"
+    if read.is_reverse:
+        strand = "-"
+    return strand
+
+
+def get_read_pos(read, st="start"):
+    """
+    Return the read position (zero-based) used for the intersection with
+    the restriction fragment
+
+    The 5' end is not a good choice for the reverse reads (which contain part
+    of the restriction site, and thus overlap the next restriction fragment)
+    Using the left-most position (5' for forward, 3' for reverse) or the
+    middle of the read should work but the middle of the reads might be more
+    safe
+
+    Parameters
+    -----------
+    read : list
+        list of aligned reads
+    """
+    if st == "middle":
+        pos = read.reference_start + int(read.alen/2)
+    elif st =="start":
+        pos = get_read_start(read)
+    elif st == "left":
+        pos = read.reference_start
+
+    return pos
+
+
+def get_read_start(read):
+    """                                                                                                                                                                                                        
+    Return the 5' end of the read                                                                                                                                                                              
+    """
+    if read.is_reverse:
+        pos = read.reference_start + read.alen -1
+    else:
+        pos = read.reference_start
+    return pos
+
+
+def get_ordered_reads(read1, read2):
+    """
+    Reorient reads
+
+    The sequencing is usually not oriented. Reorient the reads so that r1 is
+    always before r2
+
+    read1 = [AlignedRead]
+    read2 = [AlignedRead]
+    """
+    if read1.reference_id == read2.reference_id:
+        if get_read_pos(read1) < get_read_pos(read2):
+            r1, r2 = read1, read2
+        else:
+            r1, r2 = read2, read1
+    else:
+        if read1.reference_id < read2.reference_id:
+            r1, r2 = read1, read2
+        else:
+            r1, r2 = read2, read1
+
+    return r1, r2
+
+
+def isIntraChrom(read1, read2):
+    """
+    Return true is the reads pair is intrachromosomal
+    
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+
+    """
+    if read1.reference_id == read2.reference_id:
+        return True
+    else:
+        return False
+
+
+def get_valid_orientation(read1, read2):
+    """
+    Both reads are expected to be on the different restriction fragments
+
+    Check the orientation of reads ->-> / <-<- / -><- / <-->
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+
+    """
+    # Get oriented reads
+    r1, r2 = get_ordered_reads(read1, read2)
+
+    direction = None
+    if get_read_strand(r1) == "+" and get_read_strand(r2) == "+":
+        direction = "FF"
+    elif get_read_strand(r1) == "-" and get_read_strand(r2) == "-":
+        direction = "RR"
+    elif get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
+        direction = "FR"
+    elif get_read_strand(r1) == "-" and get_read_strand(r2) == "+":
+        direction = "RF"
+
+    return direction
+
+
+def get_cis_dist(read1, read2):
+     """
+     Calculte the size of the DNA fragment library
+
+     read1 : [AlignedRead]
+     read2 : [AlignedRead]
+
+     """
+     # Get oriented reads
+     ##r1, r2 = get_ordered_reads(read1, read2)
+     dist = None
+     if not r1.is_unmapped and not r2.is_unmapped:         
+         ## Contact distances can be calculated for intrachromosomal reads only
+         if isIntraChrom(read1, read2):
+             r1pos = get_read_pos(read1)
+             r2pos = get_read_pos(read2)
+             dist = abs(r1pos - r2pos)
+     return dist
+
+
+def get_read_tag(read, tag):
+    for t in read.get_tags():
+        if t[0] == tag:
+            return t[1]
+    return None
+
+
+if __name__ == "__main__":
+    # Read command line arguments
+    opts = get_args()
+    verbose = False
+    allOutput = False
+    minInsertSize = None
+    maxInsertSize = None
+    minDist = None
+    outputDir = "."
+    gtag = None
+
+    if len(opts) == 0:
+        usage()
+        sys.exit()
+
+    for opt, arg in opts:
+        if opt in ("-h", "--help"):
+            usage()
+            sys.exit()
+        elif opt in ("-r", "--mappedReadsFile"):
+            mappedReadsFile = arg
+        elif opt in ("-o", "--outputDir"):
+            outputDir = arg
+        elif opt in ("-d", "--minCisDist"):
+            minDist = arg
+        elif opt in ("-g", "--gtag"):
+            gtag = arg
+        elif opt in ("-a", "--all"):
+            allOutput = True
+        elif opt in ("-v", "--verbose"):
+            verbose = True
+        else:
+            assert False, "unhandled option"
+
+    # Verbose mode
+    if verbose:
+        print("## overlapMapped2HiCFragments.py")
+        print("## mappedReadsFile=", mappedReadsFile)
+        print("## minCisDist=", minDist)
+        print("## allOuput=", allOutput)
+        print("## verbose={}\n".format(verbose))
+
+    # Initialize variables
+    reads_counter = 0
+    valid_counter = 0
+    valid_counter_FF = 0
+    valid_counter_RR = 0
+    valid_counter_FR = 0
+    valid_counter_RF = 0
+    single_counter = 0
+    dump_counter = 0
+    filt_counter = 0
+
+    # AS counter
+    G1G1_ascounter = 0
+    G2G2_ascounter = 0
+    G1U_ascounter = 0
+    UG1_ascounter = 0
+    G2U_ascounter = 0
+    UG2_ascounter = 0
+    G1G2_ascounter = 0
+    G2G1_ascounter = 0
+    UU_ascounter = 0
+    CF_ascounter = 0
+
+    baseReadsFile = os.path.basename(mappedReadsFile)
+    baseReadsFile = re.sub(r'\.bam$|\.sam$', '', baseReadsFile)
+
+    # Open handlers for output files
+    handle_valid = open(outputDir + '/' + baseReadsFile + '.validPairs', 'w')
+
+    if allOutput:
+        handle_dump = open(outputDir + '/' + baseReadsFile + '.DumpPairs', 'w')
+        handle_single = open(outputDir + '/' + baseReadsFile + '.SinglePairs','w')
+        handle_filt = open(outputDir + '/' + baseReadsFile + '.FiltPairs','w')
+
+    # Read the SAM/BAM file
+    if verbose:
+        print("## Opening SAM/BAM file {} ...".format(mappedReadsFile))
+    samfile = pysam.Samfile(mappedReadsFile, "rb")
+
+    # Reads are 0-based too (for both SAM and BAM format)
+    # Loop on all reads
+    for read in samfile.fetch(until_eof=True):
+        reads_counter += 1
+        cur_handler = None
+        interactionType = None
+        htag = ""
+
+        # First mate
+        if read.is_read1:
+            r1 = read
+            if not r1.is_unmapped:
+                r1_chrom = samfile.get_reference_name(r1.reference_id)
+            else:
+                r1_chrom = None
+
+        # Second mate
+        elif read.is_read2:
+            r2 = read
+            if not r2.is_unmapped:
+                r2_chrom = samfile.get_reference_name(r2.reference_id)
+            else:
+                r2_chrom = None
+
+            if isIntraChrom(r1, r2):
+                dist = get_cis_dist(r1, r2)
+            else:
+                dist = None
+
+            # Check singleton
+            if r1.is_unmapped or r2.is_unmapped:
+                interactionType = "SI"
+                single_counter += 1
+                cur_handler = handle_single if allOutput else None
+
+            # Check Distance criteria - Filter
+            if (minDist is not None and dist is not None and dist < int(minDist)):
+                interactionType = "FILT"
+                filt_counter += 1
+                cur_handler = handle_filt if allOutput else None
+
+            # By default pair is valid
+            if interactionType == None:
+                interactionType = "VI"
+                valid_counter += 1
+                cur_handler = handle_valid
+                validType = get_valid_orientation(r1, r2)
+                if validType == "RR":
+                    valid_counter_RR += 1
+                elif validType == "FF":
+                    valid_counter_FF += 1
+                elif validType == "FR":
+                    valid_counter_FR += 1
+                elif validType == "RF":
+                    valid_counter_RF += 1
+                else:
+                    interactionType = "DUMP"
+                    dump_counter += 1
+                    cur_handler = handle_dump if allOutput else None
+
+
+
+            # Split valid pairs based on XA tag
+            if gtag is not None:
+                r1as = get_read_tag(r1, gtag)
+                r2as = get_read_tag(r2, gtag)
+                        
+                if r1as == 1 and r2as == 1:
+                    G1G1_ascounter += 1
+                elif r1as == 2 and r2as == 2:
+                    G2G2_ascounter += 1
+                elif r1as == 1 and r2as == 0:
+                    G1U_ascounter += 1
+                elif r1as == 0 and r2as == 1:
+                    UG1_ascounter += 1
+                elif r1as == 2 and r2as == 0:
+                    G2U_ascounter += 1
+                elif r1as == 0 and r2as == 2:
+                    UG2_ascounter += 1
+                elif r1as == 1 and r2as == 2:
+                    G1G2_ascounter += 1
+                elif r1as == 2 and r2as == 1:
+                    G2G1_ascounter += 1
+                elif r1as == 3 or r2as == 3:
+                    CF_ascounter += 1
+                else:
+                    UU_ascounter += 1
+                        
+       
+            if cur_handler is not None:
+                if not r1.is_unmapped and not r2.is_unmapped:
+                    
+                    ##reorient reads to ease duplicates removal
+                    or1, or2 = get_ordered_reads(r1, r2)
+                    or1_chrom = samfile.get_reference_name(or1.reference_id)
+                    or2_chrom = samfile.get_reference_name(or2.reference_id)
+
+                    ##reset as tag now that the reads are oriented
+                    r1as = get_read_tag(or1, gtag)
+                    r2as = get_read_tag(or2, gtag)
+                    if gtag is not None:
+                        htag = str(r1as)+"-"+str(r2as)
+                        
+                    cur_handler.write(
+                        or1.query_name + "\t" +
+                        or1_chrom + "\t" +
+                        str(get_read_pos(or1)+1) + "\t" +
+                        str(get_read_strand(or1)) + "\t" +
+                        or2_chrom + "\t" +
+                        str(get_read_pos(or2)+1) + "\t" +
+                        str(get_read_strand(or2)) + "\t" +
+                        "NA" + "\t" + ##dist 
+                        "NA" + "\t" + ##resfrag1
+                        "NA" + "\t" + ##resfrag2
+                        str(or1.mapping_quality) + "\t" + 
+                        str(or2.mapping_quality) + "\t" + 
+                        str(htag) + "\n")
+                
+                elif r2.is_unmapped and not r1.is_unmapped:
+                    cur_handler.write(
+                        r1.query_name + "\t" +
+                        r1_chrom + "\t" +
+                        str(get_read_pos(r1)+1) + "\t" +
+                        str(get_read_strand(r1)) + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" + 
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        str(r1.mapping_quality) + "\t" + 
+                        "*" + "\n")
+                elif r1.is_unmapped and not r2.is_unmapped:
+                    cur_handler.write(
+                        r2.query_name + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        r2_chrom + "\t" +
+                        str(get_read_pos(r2)+1) + "\t" +
+                        str(get_read_strand(r2)) + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" + 
+                        str(r2.mapping_quality) + "\n")
+
+            if (reads_counter % 100000 == 0 and verbose):
+                print("##", reads_counter)
+
+    # Close handler
+    handle_valid.close()
+    if allOutput:
+        handle_dump.close()
+        handle_single.close()
+        handle_filt.close()
+
+    # Write stats file
+    with open(outputDir + '/' + baseReadsFile + '.RSstat', 'w') as handle_stat:
+        handle_stat.write("## Hi-C processing - no restriction fragments\n")
+        handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
+        handle_stat.write("Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
+        handle_stat.write("Valid_interaction_pairs_RR\t" + str(valid_counter_RR) + "\n")
+        handle_stat.write("Valid_interaction_pairs_RF\t" + str(valid_counter_RF) + "\n")
+        handle_stat.write("Valid_interaction_pairs_FR\t" + str(valid_counter_FR) + "\n")
+        handle_stat.write("Single-end_pairs\t" + str(single_counter) + "\n")
+        handle_stat.write("Filtered_pairs\t" + str(filt_counter) + "\n")
+        handle_stat.write("Dumped_pairs\t" + str(dump_counter) + "\n")
+
+    ## Write AS report
+        if gtag is not None:
+            handle_stat.write("## ======================================\n")
+            handle_stat.write("## Allele specific information\n")
+            handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t" + str(UG1_ascounter+G1U_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t" + str(UG2_ascounter+G2U_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter+G2G1_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
+            handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
+
+
+
diff --git a/bin/mapped_2hic_fragments.py b/bin/mapped_2hic_fragments.py
new file mode 100755
index 0000000000000000000000000000000000000000..e823ee02cce862b704c2b6939d1642db579665be
--- /dev/null
+++ b/bin/mapped_2hic_fragments.py
@@ -0,0 +1,828 @@
+#!/usr/bin/env python
+
+# HiC-Pro
+# Copyleft 2015 Institut Curie
+# Author(s): Nicolas Servant, Eric Viara
+# Contact: nicolas.servant@curie.fr
+# This software is distributed without any guarantee under the terms of the
+# GNU General
+# Public License, either Version 2, June 1991 or Version 3, June 2007.
+
+"""
+Script to keep only valid 3C products - DE and SC are removed
+Output is : readname / 
+"""
+import time
+import getopt
+import sys
+import os
+import re
+import pysam
+from bx.intervals.intersection import Intersecter, Interval
+
+
+def usage():
+    """Usage function"""
+    print("Usage : python mapped_2hic_fragments.py")
+    print("-f/--fragmentFile <Restriction fragment file GFF3>")
+    print("-r/--mappedReadsFile <BAM/SAM file of mapped reads>")
+    print("[-o/--outputDir] <Output directory. Default is current directory>")
+    print("[-s/--shortestInsertSize] <Shortest insert size of mapped reads to consider>")
+    print("[-l/--longestInsertSize] <Longest insert size of mapped reads to consider>")
+    print("[-t/--shortestFragmentLength] <Shortest restriction fragment length to consider>")
+    print("[-m/--longestFragmentLength] <Longest restriction fragment length to consider>")
+    print("[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>")
+    print("[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>")
+    print("[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>")
+    print("[-S/--sam] <Output an additional SAM file with flag 'CT' for pairs classification>")
+    print("[-v/--verbose] <Verbose>")
+    print("[-h/--help] <Help>")
+    return
+
+
+def get_args():
+    """Get argument"""
+    try:
+        opts, args = getopt.getopt(
+            sys.argv[1:],
+            "f:r:o:s:l:t:m:d:g:Svah",
+            ["fragmentFile=",
+             "mappedReadsFile=",
+             "outputDir=", 
+             "minInsertSize=", "maxInsertSize", 
+             "minFragSize", "maxFragSize", 
+             "minDist",
+             "gatg", "sam", "verbose", "all", "help"])
+    except getopt.GetoptError:
+        usage()
+        sys.exit(-1)
+    return opts
+
+
+def timing(function, *args):
+    """
+    Run a fonction and eturn the run time and the result of the function
+    If the function requires arguments, those can be passed in
+    """
+    startTime = time.time()
+    result = function(*args)
+    print('{} function took {:.3f}ms'.format(function.__name__, (time.time() - startTime) * 1000))
+    return result
+
+
+def get_read_strand(read):
+    """
+    Conversion of read position to naive strand representation
+
+    Parameters
+    ----------
+    read : list
+        list of aligned reads
+    """
+    strand = "+"
+    if read.is_reverse:
+        strand = "-"
+    return strand
+
+
+def isIntraChrom(read1, read2):
+    """
+    Return true is the reads pair is intrachromosomal
+    
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+
+    """
+    if read1.tid == read2.tid:
+        return True
+    return False
+
+
+def get_cis_dist(read1, read2):
+     """
+     Calculte the contact distance between two intrachromosomal reads
+
+     read1 : [AlignedRead]
+     read2 : [AlignedRead]
+
+     """
+     # Get oriented reads
+     ##r1, r2 = get_ordered_reads(read1, read2)
+     dist = None
+     if not read1.is_unmapped and not read2.is_unmapped:         
+         ## Contact distances can be calculated for intrachromosomal reads only
+         if isIntraChrom(read1, read2):
+             r1pos, r2pos = get_read_pos(read1), get_read_pos(read2)
+             dist = abs(r1pos - r2pos)
+     return dist
+
+
+def get_read_pos(read, st="start"):
+    """
+    Return the read position (zero-based) used for the intersection with
+    the restriction fragment
+
+    The 5' end is not a good choice for the reverse reads (which contain part
+    of the restriction site, and thus overlap the next restriction fragment)
+    Using the left-most position (ie. start, 5' for forward, 3' for reverse) or the
+    middle of the read should work but the middle of the reads might be more
+    safe
+
+    Parameters
+    -----------
+    read : list
+        list of aligned reads
+    """
+
+    if st == "middle":
+        pos = read.reference_start + int(read.alen/2)
+    elif st =="start":
+        pos = get_read_start(read)
+    elif st == "left":
+        pos = read.reference_start
+    
+    return pos
+
+
+def get_read_start(read):
+    """
+    Return the 5' end of the read
+    """
+    if read.is_reverse:
+        pos = read.reference_start + read.alen -1
+    else:
+        pos = read.reference_start
+    return pos
+
+def get_ordered_reads(read1, read2):
+    """
+    Reorient reads
+
+    The sequencing is usually not oriented. Reorient the reads so that r1 is
+    always before r2.
+    Sequencing is always performed from 5' to 3' end
+    So in unstranded case, we can have
+
+    1              2
+    --->           --->
+    ==========  or =========
+         <----          <---
+             2             1
+
+    Reordering the reads allow to always be in the first case
+    read1 = [AlignedRead]
+    read2 = [AlignedRead]
+    """
+    if read1.tid == read2.tid:
+        if get_read_pos(read1) < get_read_pos(read2):
+            r1, r2 = read1, read2
+        else:
+            r1, r2 = read2, read1
+    else:
+        if read1.tid < read2.tid:
+            r1, r2 = read1, read2
+        else:
+            r1, r2 = read2, read1
+                
+    return r1, r2
+
+def load_restriction_fragment(in_file, minfragsize=None, maxfragsize=None, verbose=False):
+    """
+    Read a BED file and store the intervals in a tree
+
+    Intervals are zero-based objects. The output object is a hash table with
+    one search tree per chromosome
+
+    in_file = input file [character]
+    verbose = verbose mode [logical]
+
+    """
+    resFrag = {}
+    if verbose:
+        print("## Loading Restriction File Intervals {} ...".format(in_file))
+    bed_handle = open(in_file)
+    nline = 0
+    nfilt = 0
+    for line in bed_handle:
+         nline += 1
+         bedtab = line.split("\t")
+         try:
+              chromosome, start, end, name = bedtab[:4]
+         except ValueError:
+              print("Warning : wrong input format in line {}. Not a BED file ?!".format(nline))
+              continue
+
+        # BED files are zero-based as Intervals objects
+         start = int(start)  # + 1
+         end = int(end)
+         fragl = abs(end - start)
+         name = name.strip()
+
+         ## Discard fragments outside the size range
+         filt = False
+         if minfragsize != None and int(fragl) < int(minfragsize):
+             nfilt += 1
+             filt = True
+         elif maxfragsize != None and int(fragl) > int(maxfragsize):
+             nfilt += 1
+             filt = True
+       
+         if chromosome in resFrag:
+             tree = resFrag[chromosome]
+             tree.add_interval(Interval(start, end, value={'name': name, 'filter': filt}))
+         else:
+             tree = Intersecter()
+             tree.add_interval(Interval(start, end, value={'name': name, 'filter': filt}))
+             resFrag[chromosome] = tree
+    
+    if nfilt > 0:
+        print("Warning : {} fragment(s) outside of range and discarded. {} remaining.".format(nfilt, nline - nfilt))
+    bed_handle.close()
+    return resFrag
+
+
+def get_overlapping_restriction_fragment(resFrag, chrom, read):
+    """
+    Intersect a given read with the set of restriction fragments
+
+    ##
+    resFrag = the restriction fragments [hash]
+    chrom = the chromosome to look at [character]
+    read = the read to intersect [AlignedRead]
+
+    """
+    # Get read position (middle or start)
+    pos = get_read_pos(read, st="middle")
+    
+    if chrom in resFrag:
+        # Overlap with the position of the read (zero-based)
+        resfrag = resFrag[chrom].find(pos, pos+1)
+        if len(resfrag) > 1:
+            print("Warning : {} restictions fragments found for {} -skipped".format(len(resfrag), read.query_name))
+            return None
+        elif len(resfrag) == 0:
+            print("Warning - no restriction fragments for {} at {} : {}".format(read.query_name, chrom, pos))
+            return None
+        else:
+            return resfrag[0]
+    else:
+        print("Warning - no restriction fragments for {} at {} : {}".format(read.qname, chrom, pos))
+        return None
+
+
+def are_contiguous_fragments(frag1, frag2, chr1, chr2):
+    '''
+    Compare fragment positions to check if they are contiguous
+    '''
+    ret = False
+    if chr1 == chr2:
+        if int(frag1.start) < int(frag2.start):
+            d = int(frag2.start) - int(frag1.end)
+        else:
+            d = int(frag1.start) - int(frag2.end)
+            
+        if d == 0:
+            ret = True
+    
+    return ret
+
+def is_religation(read1, read2, frag1, frag2):
+    """
+    Reads are expected to map adjacent fragments
+    Check the orientation of reads -><-
+
+    """
+    ret = False
+    if are_contiguous_fragments(frag1, frag2, read1.tid, read2.tid):
+        #r1, r2 = get_ordered_reads(read1, read2)
+        #if get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
+        ret = True
+    return ret
+
+
+def is_self_circle(read1, read2):
+    """
+    Both reads are expected to be on the same restriction fragments
+    Check the orientation of reads <-->
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+    """
+    ret = False
+    # Get oriented reads
+    r1, r2 = get_ordered_reads(read1, read2)
+    # 1<- ->2 or 2<- ->1
+    if get_read_strand(r1) == "-" and get_read_strand(r2) == "+":
+        ret = True
+    return ret
+
+
+def is_dangling_end(read1, read2):
+    """
+    Both reads are expected to be on the same restriction fragments
+    Check the orientation of reads -><-
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+    """
+    ret = False
+    # Get oriented reads
+    r1, r2 = get_ordered_reads(read1, read2)
+    # 1-> <-2 or 2-> <-1
+    if get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
+        ret = True
+    return ret
+
+
+def get_valid_orientation(read1, read2):
+    """
+    Both reads are expected to be on the different restriction fragments
+    Check the orientation of reads ->-> / <-<- / -><- / <-->
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+
+    """
+    # Get oriented reads
+    r1, r2 = get_ordered_reads(read1, read2)
+
+    direction = None
+    if get_read_strand(r1) == "+" and get_read_strand(r2) == "+":
+        direction = "FF"
+    elif get_read_strand(r1) == "-" and get_read_strand(r2) == "-":
+        direction = "RR"
+    elif get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
+        direction = "FR"
+    elif get_read_strand(r1) == "-" and get_read_strand(r2) == "+":
+        direction = "RF"
+
+    return direction
+
+
+def get_PE_fragment_size(read1, read2, resFrag1, resFrag2, interactionType):
+    """
+    Calculte the size of the DNA fragment library
+
+    read1 : [AlignedRead]
+    read2 : [AlignedRead]
+    resfrag1 = restriction fragment overlapping the R1 read [interval]
+    resfrag1 = restriction fragment overlapping the R1 read [interval]
+    interactionType : Type of interaction from get_interaction_type() [str]
+
+    """
+
+    fragmentsize = None
+
+    # Get oriented reads
+    r1, r2 = get_ordered_reads(read1, read2)
+    if not r1.is_unmapped and not r2.is_unmapped:
+        if r1 == read2:
+            rfrag1 = resFrag2
+            rfrag2 = resFrag1
+        else:
+            rfrag1 = resFrag1
+            rfrag2 = resFrag2
+
+        ## In this case use the read start !
+        r1pos = get_read_start(r1)
+        r2pos = get_read_start(r2)
+
+        if interactionType == "DE" or interactionType == "RE":
+            fragmentsize = r2pos - r1pos
+        elif interactionType == "SC":
+            fragmentsize = (r1pos - rfrag1.start) + (rfrag2.end - r2pos)
+        elif interactionType == "VI":
+            if get_read_strand(r1) == "+":
+                dr1 = rfrag1.end - r1pos
+            else:
+                dr1 = r1pos - rfrag1.start
+            if get_read_strand(r2) == "+":
+                dr2 = rfrag2.end - r2pos
+            else:
+                dr2 = r2pos - rfrag2.start
+            fragmentsize = dr2 + dr1
+
+    return fragmentsize
+
+
+def get_interaction_type(read1, read1_chrom, resfrag1, read2,
+                         read2_chrom, resfrag2, verbose):
+    """
+    Returns the interaction type
+
+    For a given reads pair and their related restriction fragment, classify
+    the 3C products as :
+
+    - Interaction
+    - Self circle
+    - Dangling end
+    - Religation
+    - Unknown
+
+    ##
+    read1 = the R1 read of the pair [AlignedRead]
+    read1_chrom = the chromosome of R1 read [character]
+    resfrag1 = restrictin fragment overlapping the R1 read [interval]
+    read2 = the R2 read of the pair [AlignedRead]
+    read2_chrom = the chromosome of R2 read [character]
+    resfrag2 = restrictin fragment overlapping the R2 read [interval]
+    verbose = verbose mode [logical]
+
+    """
+
+    # If returned InteractionType=None -> Same restriction fragment
+    # and same strand = Dump
+    interactionType = None
+      
+    if not read1.is_unmapped and not read2.is_unmapped and resfrag1 is not None and resfrag2 is not None:
+        # same restriction fragment
+        if resfrag1 == resfrag2:
+            # Self_circle <- ->
+            if is_self_circle(read1, read2):
+                interactionType = "SC"
+            # Dangling_end -> <-
+            elif is_dangling_end(read1, read2):
+                interactionType = "DE"
+        elif is_religation(read1, read2, resfrag1, resfrag2):
+            interactionType = "RE"
+        else:
+            interactionType = "VI"
+    elif r1.is_unmapped or r2.is_unmapped:
+        interactionType = "SI"
+
+    return interactionType
+
+
+def get_read_tag(read, tag):
+    for t in read.get_tags():
+        if t[0] == tag:
+            return t[1]
+    return None
+
+
+if __name__ == "__main__":
+    # Read command line arguments
+    opts = get_args()
+    samOut = False
+    verbose = False
+    allOutput = False
+    minInsertSize = None
+    maxInsertSize = None
+    minFragSize = None
+    maxFragSize = None
+    minDist = None
+    outputDir = "."
+    gtag = None
+
+    if len(opts) == 0:
+        usage()
+        sys.exit()
+
+    for opt, arg in opts:
+        if opt in ("-h", "--help"):
+            usage()
+            sys.exit()
+        elif opt in ("-f", "--fragmentFile"):
+            fragmentFile = arg
+        elif opt in ("-r", "--mappedReadsFile"):
+            mappedReadsFile = arg
+        elif opt in ("-o", "--outputDir"):
+            outputDir = arg
+        elif opt in ("-s", "--shortestInsertSize"):
+            minInsertSize = arg
+        elif opt in ("-l", "--longestInsertSize"):
+            maxInsertSize = arg
+        elif opt in ("-t", "--shortestFragmentLength"):
+            minFragSize = arg
+        elif opt in ("-m", "--longestFragmentLength"):
+            maxFragSize = arg
+        elif opt in ("-d", "--minCisDist"):
+            minDist = arg
+        elif opt in ("-g", "--gtag"):
+            gtag = arg
+        elif opt in ("-a", "--all"):
+            allOutput = True
+        elif opt in ("-S", "--sam"):
+            samOut = True
+        elif opt in ("-v", "--verbose"):
+            verbose = True
+        else:
+            assert False, "unhandled option"
+
+    # Verbose mode
+    if verbose:
+        print("## overlapMapped2HiCFragments.py")
+        print("## mappedReadsFile=", mappedReadsFile)
+        print("## fragmentFile=", fragmentFile)
+        print("## minInsertSize=", minInsertSize)
+        print("## maxInsertSize=", maxInsertSize)
+        print("## minFragSize=", minFragSize)
+        print("## maxFragSize=", maxFragSize)
+        print("## allOuput=", allOutput)
+        print("## SAM ouput=", samOut)
+        print("## verbose={}\n".format(verbose))
+
+    # Initialize variables
+    reads_counter = 0
+    de_counter = 0
+    re_counter = 0
+    sc_counter = 0
+    valid_counter = 0
+    valid_counter_FF = 0
+    valid_counter_RR = 0
+    valid_counter_FR = 0
+    valid_counter_RF = 0
+    single_counter = 0
+    dump_counter = 0
+    filt_counter = 0
+
+    ## AS counter
+    G1G1_ascounter = 0
+    G2G2_ascounter = 0
+    G1U_ascounter = 0
+    UG1_ascounter = 0
+    G2U_ascounter = 0
+    UG2_ascounter = 0
+    G1G2_ascounter = 0
+    G2G1_ascounter = 0
+    UU_ascounter = 0
+    CF_ascounter = 0
+
+    baseReadsFile = os.path.basename(mappedReadsFile)
+    baseReadsFile = re.sub(r'\.bam$|\.sam$', '', baseReadsFile)
+
+    # Open handlers for output files
+    handle_valid = open(outputDir + '/' + baseReadsFile + '.validPairs', 'w')
+
+    if allOutput:
+        handle_de = open(outputDir + '/' + baseReadsFile + '.DEPairs', 'w')
+        handle_re = open(outputDir + '/' + baseReadsFile + '.REPairs', 'w')
+        handle_sc = open(outputDir + '/' + baseReadsFile + '.SCPairs', 'w')
+        handle_dump = open(outputDir + '/' + baseReadsFile + '.DumpPairs', 'w')
+        handle_single = open(outputDir + '/' + baseReadsFile + '.SinglePairs', 'w')
+        handle_filt = open(outputDir + '/' + baseReadsFile + '.FiltPairs', 'w')
+
+    # Read the BED file
+    resFrag = timing(load_restriction_fragment, fragmentFile, minFragSize, maxFragSize, verbose)
+     
+    # Read the SAM/BAM file
+    if verbose:
+        print("## Opening SAM/BAM file {} ...".format(mappedReadsFile))
+    samfile = pysam.Samfile(mappedReadsFile, "rb")
+
+    if samOut:
+        handle_sam = pysam.AlignmentFile(outputDir + '/' + baseReadsFile + '_interaction.bam', "wb", template=samfile)
+
+    # Reads are 0-based too (for both SAM and BAM format)
+    # Loop on all reads
+    if verbose:
+        print("## Classifying Interactions ...")
+
+    for read in samfile.fetch(until_eof=True):
+        reads_counter += 1
+        cur_handler = None
+        htag = ""
+
+        # First mate
+        if read.is_read1:
+            r1 = read
+            if not r1.is_unmapped:
+                r1_chrom = samfile.get_reference_name(r1.tid)
+                r1_resfrag = get_overlapping_restriction_fragment(resFrag, r1_chrom, r1)
+            else:
+                r1_resfrag = None
+                r1_chrom = None
+
+        # Second mate
+        elif read.is_read2:
+            r2 = read
+            if not r2.is_unmapped:
+                r2_chrom = samfile.get_reference_name(r2.tid)
+                r2_resfrag = get_overlapping_restriction_fragment(resFrag, r2_chrom, r2)
+            else:
+                r2_resfrag = None
+                r2_chrom = None
+
+            if r1_resfrag is not None or r2_resfrag is not None:
+
+                interactionType = get_interaction_type(r1, r1_chrom, r1_resfrag, r2, r2_chrom, r2_resfrag, verbose)
+                dist = get_PE_fragment_size(r1, r2, r1_resfrag, r2_resfrag, interactionType)
+                cdist = get_cis_dist(r1, r2)
+                
+                ## Filter based on restriction fragments
+                if (r1_resfrag is not None and r1_resfrag.value['filter'] == True) or (r2_resfrag is not None and r2_resfrag.value['filter']) == True:
+                    interactionType = "FILT"
+   
+                # Check Insert size criteria - FILT
+                if (minInsertSize is not None and dist is not None and
+                    dist < int(minInsertSize)) or \
+                    (maxInsertSize is not None and dist is not None and dist > int(maxInsertSize)):
+                    interactionType = "FILT"
+
+                # Check Distance criteria - FILT
+                # Done for VI otherwise this criteria will overwrite all other invalid classification
+                if (interactionType == "VI" and minDist is not None and cdist is not None and cdist < int(minDist)):
+                    interactionType = "FILT"
+        
+                if interactionType == "VI":
+                    valid_counter += 1
+                    cur_handler = handle_valid
+                    validType = get_valid_orientation(r1, r2)
+                    if validType == "RR":
+                        valid_counter_RR += 1
+                    elif validType == "FF":
+                        valid_counter_FF += 1
+                    elif validType == "FR":
+                        valid_counter_FR += 1
+                    elif validType == "RF":
+                        valid_counter_RF += 1
+
+                    ## Counts valid pairs based on XA tag
+                    if gtag is not None:
+                        r1as = get_read_tag(r1, gtag)
+                        r2as = get_read_tag(r2, gtag)
+                        if r1as == 1 and r2as == 1:
+                            G1G1_ascounter += 1
+                        elif r1as == 2 and r2as == 2:
+                            G2G2_ascounter += 1
+                        elif r1as == 1 and r2as == 0:
+                            G1U_ascounter += 1
+                        elif r1as == 0 and r2as == 1:
+                            UG1_ascounter += 1
+                        elif r1as == 2 and r2as == 0:
+                            G2U_ascounter += 1
+                        elif r1as == 0 and r2as == 2:
+                            UG2_ascounter += 1
+                        elif r1as == 1 and r2as == 2:
+                            G1G2_ascounter += 1
+                        elif r1as == 2 and r2as == 1:
+                            G2G1_ascounter += 1
+                        elif r1as == 3 or r2as == 3:
+                            CF_ascounter += 1
+                        else:
+                            UU_ascounter += 1
+
+                elif interactionType == "DE":
+                    de_counter += 1
+                    cur_handler = handle_de if allOutput else None
+
+                elif interactionType == "RE":
+                    re_counter += 1
+                    cur_handler = handle_re if allOutput else None
+
+                elif interactionType == "SC":
+                    sc_counter += 1
+                    cur_handler = handle_sc if allOutput else None
+
+                elif interactionType == "SI":
+                    single_counter += 1
+                    cur_handler = handle_single if allOutput else None
+                
+                elif interactionType == "FILT":
+                    filt_counter += 1
+                    cur_handler = handle_filt if allOutput else None
+                
+                else:
+                    interactionType = "DUMP"
+                    dump_counter += 1
+                    cur_handler = handle_dump if allOutput else None
+            else:
+                interactionType = "DUMP"
+                dump_counter += 1
+                cur_handler = handle_dump if allOutput else None
+                dist = None
+
+            ## Write results in right handler
+            if cur_handler is not None:
+                if not r1.is_unmapped and not r2.is_unmapped:                 
+                    ##reorient reads to ease duplicates removal
+                    or1, or2 = get_ordered_reads(r1, r2)
+                    or1_chrom = samfile.get_reference_name(or1.tid)
+                    or2_chrom = samfile.get_reference_name(or2.tid)
+                    
+                    ##reset as tag now that the reads are oriented
+                    r1as = get_read_tag(or1, gtag)
+                    r2as = get_read_tag(or2, gtag)
+                    if gtag is not None:
+                        htag = str(r1as)+"-"+str(r2as)
+
+                    ##get fragment name and reorient if necessary
+                    if or1 == r1 and or2 == r2:
+                        or1_resfrag = r1_resfrag
+                        or2_resfrag = r2_resfrag
+                    elif or1 == r2 and or2 == r1:
+                        or1_resfrag = r2_resfrag
+                        or2_resfrag = r1_resfrag
+
+                    if or1_resfrag is not None:
+                        or1_fragname = or1_resfrag.value['name']
+                    else:
+                        or1_fragname = 'None'
+                        
+                    if or2_resfrag is not None:
+                        or2_fragname = or2_resfrag.value['name']
+                    else:
+                        or2_fragname = 'None'
+                        
+                    cur_handler.write(
+                        or1.query_name + "\t" +
+                        or1_chrom + "\t" +
+                        str(get_read_pos(or1)+1) + "\t" +
+                        str(get_read_strand(or1)) + "\t" +
+                        or2_chrom + "\t" +
+                        str(get_read_pos(or2)+1) + "\t" +
+                        str(get_read_strand(or2)) + "\t" +
+                        str(dist) + "\t" + 
+                        or1_fragname + "\t" +
+                        or2_fragname + "\t" +
+                        str(or1.mapping_quality) + "\t" + 
+                        str(or2.mapping_quality) + "\t" + 
+                        str(htag) + "\n")
+
+                elif r2.is_unmapped and not r1.is_unmapped:
+                    if r1_resfrag is not None:
+                        r1_fragname = r1_resfrag.value['name']
+                          
+                    cur_handler.write(
+                        r1.query_name + "\t" +
+                        r1_chrom + "\t" +
+                        str(get_read_pos(r1)+1) + "\t" +
+                        str(get_read_strand(r1)) + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" + 
+                        r1_fragname + "\t" +
+                        "*" + "\t" +
+                        str(r1.mapping_quality) + "\t" + 
+                        "*" + "\n")
+                elif r1.is_unmapped and not r2.is_unmapped:
+                    if r2_resfrag is not None:
+                        r2_fragname = r2_resfrag.value['name']
+                    
+                    cur_handler.write(
+                        r2.query_name + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        r2_chrom + "\t" +
+                        str(get_read_pos(r2)+1) + "\t" +
+                        str(get_read_strand(r2)) + "\t" +
+                        "*" + "\t" +
+                        "*" + "\t" +
+                        r2_fragname + "\t" +
+                        "*" + "\t" +
+                        str(r2.mapping_quality) + "\n")
+
+                ## Keep initial order    
+                if samOut:
+                    r1.tags = r1.tags + [('CT', str(interactionType))]
+                    r2.tags = r2.tags + [('CT', str(interactionType))]
+                    handle_sam.write(r1)
+                    handle_sam.write(r2)
+
+            if (reads_counter % 100000 == 0 and verbose):
+                print("##", reads_counter)
+
+    # Close handler
+    handle_valid.close()
+    if allOutput:
+        handle_de.close()
+        handle_re.close()
+        handle_sc.close()
+        handle_dump.close()
+        handle_single.close()
+        handle_filt.close()
+
+
+    # Write stats file
+    handle_stat = open(outputDir + '/' + baseReadsFile + '.RSstat', 'w')
+    handle_stat.write("## Hi-C processing\n")
+    handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
+    handle_stat.write("Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
+    handle_stat.write("Valid_interaction_pairs_RR\t" + str(valid_counter_RR) + "\n")
+    handle_stat.write("Valid_interaction_pairs_RF\t" + str(valid_counter_RF) + "\n")
+    handle_stat.write("Valid_interaction_pairs_FR\t" + str(valid_counter_FR) + "\n")
+    handle_stat.write("Dangling_end_pairs\t" + str(de_counter) + "\n")
+    handle_stat.write("Religation_pairs\t" + str(re_counter) + "\n")
+    handle_stat.write("Self_Cycle_pairs\t" + str(sc_counter) + "\n")
+    handle_stat.write("Single-end_pairs\t" + str(single_counter) + "\n")
+    handle_stat.write("Filtered_pairs\t" + str(filt_counter) + "\n")
+    handle_stat.write("Dumped_pairs\t" + str(dump_counter) + "\n")
+
+    ## Write AS report
+    if gtag is not None:
+        handle_stat.write("## ======================================\n")
+        handle_stat.write("## Allele specific information\n")
+        handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
+        handle_stat.write("Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t" + str(UG1_ascounter+G1U_ascounter) + "\n")
+        handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
+        handle_stat.write("Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t" + str(UG2_ascounter+G2U_ascounter) + "\n")
+        handle_stat.write("Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter+G2G1_ascounter) + "\n")
+        handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
+        handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
+
+    handle_stat.close()
+
+    if samOut:
+        samfile.close()
diff --git a/bin/mergeSAM.py b/bin/mergeSAM.py
new file mode 100755
index 0000000000000000000000000000000000000000..12917b16277a0a768269f611cd13422bccbe98a1
--- /dev/null
+++ b/bin/mergeSAM.py
@@ -0,0 +1,319 @@
+#!/usr/bin/env python
+
+## HiC-Pro
+## Copyright (c) 2015 Institut Curie                               
+## Author(s): Nicolas Servant, Eric Viara
+## Contact: nicolas.servant@curie.fr
+## This software is distributed without any guarantee under the terms of the BSD-3 licence.
+## See the LICENCE file for details
+
+
+"""
+Script to pair 2 SAM/BAM files into one PE BAM
+- On 03/05/16 Ferhat made changes starting from ~/bin/HiC-Pro_2.7.2b/scripts/mergeSAM.py 
+to make singletons possible to be reported
+"""
+
+import getopt
+import sys
+import os
+import re
+import pysam
+
+def usage():
+    """Usage function"""
+    print("Usage : python mergeSAM.py")
+    print("-f/--forward <forward read mapped file>")
+    print("-r/--reverse <reverse read mapped file>")
+    print("[-o/--output] <Output file. Default is stdin>")
+    print("[-s/--single] <report singleton>")
+    print("[-m/--multi] <report multiple hits>")
+    print("[-q/--qual] <minimum reads mapping quality>")
+    print("[-t/--stat] <generate a stat file>")
+    print("[-v/--verbose] <Verbose>")
+    print("[-h/--help] <Help>")
+    return
+
+
+def get_args():
+    """Get argument"""
+    try:
+        opts, args = getopt.getopt(
+            sys.argv[1:],
+            "f:r:o:q:smtvh",
+            ["forward=",
+             "reverse=",
+             "output=", "qual=", 
+             "single", "multi", "stat", "verbose", "help"])
+    except getopt.GetoptError:
+        usage()
+        sys.exit(-1)
+    return opts
+
+
+def is_unique_bowtie2(read):
+	ret = False
+	if not read.is_unmapped and read.has_tag('AS'):
+		if read.has_tag('XS'):
+			primary =  read.get_tag('AS')
+			secondary = read.get_tag('XS')
+			if (primary > secondary):
+				ret = True
+		else:
+			ret = True
+	return ret
+
+## Remove everything after "/" or " " in read's name
+def get_read_name(read):
+    name = read.query_name
+    #return name.split("/",1)[0]
+    return re.split('/| ', name)[0]
+
+def sam_flag(read1, read2, hr1, hr2):
+	
+	f1 = read1.flag
+	f2 = read2.flag
+
+	if r1.is_unmapped == False:
+		r1_chrom = hr1.get_reference_name(r1.reference_id)
+	else:
+		r1_chrom = "*"
+	if r2.is_unmapped == False:
+		r2_chrom = hr2.get_reference_name(r2.reference_id)
+	else:
+		r2_chrom="*"
+
+
+  ##Relevant bitwise flags (flag in an 11-bit binary number)
+  ##1 The read is one of a pair
+  ##2 The alignment is one end of a proper paired-end alignment
+  ##4 The read has no reported alignments
+  ##8 The read is one of a pair and has no reported alignments
+  ##16 The alignment is to the reverse reference strand
+  ##32 The other mate in the paired-end alignment is aligned to the reverse reference strand
+  ##64 The read is the first (#1) mate in a pair
+  ##128 The read is the second (#2) mate in a pair
+  
+  ##The reads were mapped as single-end data, so should expect flags of 
+  ##0 (map to the '+' strand) or 16 (map to the '-' strand)
+  ##Output example: a paired-end read that aligns to the reverse strand 
+  ##and is the first mate in the pair will have flag 83 (= 64 + 16 + 2 + 1)
+  
+	if f1 & 0x4:
+		f1 = f1 | 0x8
+
+	if f2 & 0x4:
+		f2 = f2 | 0x8
+    
+	if (not (f1 & 0x4) and not (f2 & 0x4)):
+    ##The flag should now indicate this is paired-end data
+		f1 = f1 | 0x1
+		f1 = f1 | 0x2
+		f2 = f2 | 0x1
+		f2 = f2 | 0x2
+  
+    
+  ##Indicate if the pair is on the reverse strand
+	if f1 & 0x10:
+		f2 = f2 | 0x20
+  
+	if f2 & 0x10:
+		f1 = f1 | 0x20
+  
+  ##Is this first or the second pair?
+	f1 = f1 | 0x40
+	f2 = f2 | 0x80
+  
+    ##Insert the modified bitwise flags into the reads
+	read1.flag = f1
+	read2.flag = f2
+	
+	##Determine the RNEXT and PNEXT values (i.e. the positional values of a read's pair)
+	#RNEXT
+	if r1_chrom == r2_chrom:
+		read1.next_reference_id = r1.reference_id
+		read2.next_reference_id = r1.reference_id
+	else:
+		read1.next_reference_id = r2.reference_id
+		read2.next_reference_id = r1.reference_id
+   	#PNEXT
+	read1.next_reference_start = read2.reference_start
+	read2.next_reference_start = read1.reference_start
+
+	return(read1, read2)
+
+
+
+if __name__ == "__main__":
+    ## Read command line arguments
+	opts = get_args()
+	inputFile = None
+	outputFile = None
+	mapq = None
+	report_single = False
+	report_multi = False
+	verbose = False
+	stat = False
+	output = "-"
+
+	if len(opts) == 0:
+		usage()
+		sys.exit()
+
+	for opt, arg in opts:
+		if opt in ("-h", "--help"):
+			usage()
+			sys.exit()
+		elif opt in ("-f", "--forward"):
+			R1file = arg
+		elif opt in ("-r", "--reverse"):
+			R2file = arg
+		elif opt in ("-o", "--output"):
+			output = arg
+		elif opt in ("-q", "--qual"):
+			mapq = arg
+		elif opt in ("-s", "--single"):
+			report_single = True
+		elif opt in ("-m", "--multi"):
+			report_multi = True
+		elif opt in ("-t", "--stat"):
+			stat = True
+		elif opt in ("-v", "--verbose"):
+			verbose = True
+		else:
+			assert False, "unhandled option"
+
+    ## Verbose mode
+	if verbose:
+		print("## mergeBAM.py")
+		print("## forward=", R1file)
+		print("## reverse=", R2file)
+		print("## output=", output)
+		print("## min mapq=", mapq)
+		print("## report_single=", report_single)
+		print("## report_multi=", report_multi)
+		print("## verbose=", verbose)
+
+    ## Initialize variables
+	tot_pairs_counter = 0
+	multi_pairs_counter = 0
+	uniq_pairs_counter = 0
+	unmapped_pairs_counter = 0 
+	lowq_pairs_counter = 0
+	multi_singles_counter = 0
+	uniq_singles_counter = 0
+	lowq_singles_counter = 0
+
+    #local_counter = 0
+	paired_reads_counter = 0
+	singleton_counter = 0
+	reads_counter = 0
+	r1 = None
+	r2 = None
+
+    ## Reads are 0-based too (for both SAM and BAM format)
+    ## Loop on all reads
+	if verbose:
+		print("## Merging forward and reverse tags ...")
+	with pysam.Samfile(R1file, "rb") as hr1, pysam.Samfile(R2file, "rb") as hr2: 
+		if output == "-":
+			outfile = pysam.AlignmentFile(output, "w", template=hr1)
+		else:
+			outfile = pysam.AlignmentFile(output, "wb", template=hr1)
+		for r1, r2 in zip(hr1.fetch(until_eof=True), hr2.fetch(until_eof=True)):
+			reads_counter +=1
+
+            #print r1
+            #print r2
+            #print hr1.getrname(r1.tid)
+            #print hr2.getrname(r2.tid)
+
+			if (reads_counter % 1000000 == 0 and verbose):
+				print("##", reads_counter)
+                
+			if get_read_name(r1) == get_read_name(r2):
+                    
+                 ## both unmapped
+				if r1.is_unmapped == True and r2.is_unmapped == True:
+					unmapped_pairs_counter += 1
+					continue
+
+                ## both mapped
+				elif r1.is_unmapped == False and r2.is_unmapped == False:
+                     ## quality
+					if mapq != None and (r1.mapping_quality < int(mapq) or r2.mapping_quality < int(mapq)):
+						lowq_pairs_counter += 1
+						continue
+                 
+                     ## Unique mapping
+					if is_unique_bowtie2(r1) == True and is_unique_bowtie2(r2) == True:
+						uniq_pairs_counter += 1
+					else:
+						multi_pairs_counter += 1
+						if report_multi == False:
+							continue
+		# one end mapped, other is not
+				else:
+					singleton_counter += 1
+					if report_single == False:
+						continue
+					if r1.is_unmapped == False:  ## first end is mapped, second is not
+                         ## quality
+						if mapq != None and (r1.mapping_quality < int(mapq)): 
+							lowq_singles_counter += 1
+							continue
+                         ## Unique mapping
+						if is_unique_bowtie2(r1) == True:
+							uniq_singles_counter += 1
+						else:
+							multi_singles_counter += 1
+							if report_multi == False:
+								continue
+					else:  ## second end is mapped, first is not
+                         ## quality
+						if mapq != None and (r2.mapping_quality < int(mapq)): 
+							lowq_singles_counter += 1
+							continue
+                         ## Unique mapping
+						if is_unique_bowtie2(r2) == True:
+							uniq_singles_counter += 1
+						else:
+							multi_singles_counter += 1
+							if report_multi == False:
+								continue
+
+				tot_pairs_counter += 1          
+				(r1, r2) = sam_flag(r1,r2, hr1, hr2)
+
+                #print hr1.getrname(r1.tid)
+                #print hr2.getrname(r2.tid)
+                #print r1
+                #print r2
+                ## Write output
+				outfile.write(r1)
+				outfile.write(r2)
+
+			else:
+				print("Forward and reverse reads not paired. Check that BAM files have the same read names and are sorted.")
+				sys.exit(1)
+
+	if stat:
+		if output == '-':
+			statfile = "pairing.stat"
+		else:
+			statfile = re.sub('\.bam$', '.pairstat', output)
+		with open(statfile, 'w') as handle_stat:
+			handle_stat.write("Total_pairs_processed\t" + str(reads_counter) + "\t" + str(round(float(reads_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Unmapped_pairs\t" + str(unmapped_pairs_counter) + "\t" + str(round(float(unmapped_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Low_qual_pairs\t" + str(lowq_pairs_counter) + "\t" + str(round(float(lowq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Unique_paired_alignments\t" + str(uniq_pairs_counter) + "\t" + str(round(float(uniq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Multiple_pairs_alignments\t" + str(multi_pairs_counter) + "\t" + str(round(float(multi_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Pairs_with_singleton\t" + str(singleton_counter) + "\t" + str(round(float(singleton_counter)/float(reads_counter)*100,3)) + "\n")  
+			handle_stat.write("Low_qual_singleton\t" + str(lowq_singles_counter) + "\t" + str(round(float(lowq_singles_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Unique_singleton_alignments\t" + str(uniq_singles_counter) + "\t" + str(round(float(uniq_singles_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Multiple_singleton_alignments\t" + str(multi_singles_counter) + "\t" + str(round(float(multi_singles_counter)/float(reads_counter)*100,3)) + "\n")
+			handle_stat.write("Reported_pairs\t" + str(tot_pairs_counter) + "\t" + str(round(float(tot_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+	hr1.close()
+	hr2.close()
+	outfile.close()
+
diff --git a/bin/merge_statfiles.py b/bin/merge_statfiles.py
new file mode 100755
index 0000000000000000000000000000000000000000..dc11bf75d31973df86a0eaae0aa1c4b37e004e27
--- /dev/null
+++ b/bin/merge_statfiles.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+
+## nf-core-hic
+## Copyright (c) 2020 Institut Curie                               
+## Author(s): Nicolas Servant
+## Contact: nicolas.servant@curie.fr
+## This software is distributed without any guarantee under the terms of the BSD-3 licence.
+## See the LICENCE file for details
+
+"""
+Script to merge any files with the same template
+"""
+
+import argparse
+import sys
+import glob
+import os
+from collections import OrderedDict
+
+def num(s):
+    try:
+        return int(s)
+    except ValueError:
+        return float(s)
+
+
+if __name__ == "__main__":
+    ## Read command line arguments
+    parser = argparse.ArgumentParser()      
+    parser.add_argument("-f", "--files", help="List of input file(s)", type=str, nargs='+')
+    parser.add_argument("-v", "--verbose", help="verbose mode", action='store_true')
+    args = parser.parse_args()
+               
+    infiles = args.files
+    li = len(infiles)
+
+    if li > 0:
+        if args.verbose:
+            print("## merge_statfiles.py")
+            print("## Merging "+ str(li)+" files")
+ 
+        ## Reading first file to get the template
+        template = OrderedDict()
+        if args.verbose:
+            print("## Use "+infiles[0]+" as template")
+        with open(infiles[0]) as f:
+            for line in f:
+                if not line.startswith("#"):
+                    lsp = line.strip().split("\t")
+                    data = map(num, lsp[1:len(lsp)])
+                    template[str(lsp[0])] = list(data)
+                
+        if len(template) == 0:
+            print("Cannot find template files !")
+            sys.exit(1)
+
+        ## Int are counts / Float are percentage
+        for fidx in list(range(1, li)):
+            with open(infiles[fidx]) as f:
+                for line in f:
+                    if not line.startswith("#"):
+                        lsp = line.strip().split("\t")
+                        if lsp[0] in template:
+                            for i in list(range(1, len(lsp))):
+                                if isinstance(num(lsp[i]), int):
+                                    template[lsp[0]][i-1] += num(lsp[i])
+                                else:
+                                    template[lsp[0]][i-1] = round((template[lsp[0]][i-1] + num(lsp[i]))/2,3)
+                        else:
+                            sys.stderr.write("Warning : '"+lsp[0]+"' not found in template ["+infiles[fidx]+"]\n")
+                            
+        ## Print template
+        for x in template:
+            sys.stdout.write(x)
+            for y in template[x]:
+                sys.stdout.write("\t"+str(y))
+            sys.stdout.write("\n")
+
+    else:
+        print("No files to merge - stop")
+        sys.exit(1)
+
diff --git a/bin/scrape_software_versions.py b/bin/scrape_software_versions.py
index feb158ae01211f5e71e63c8c74a3573882f9a6a4..9f5650db54daa74c59c3967500a399ff55534f29 100755
--- a/bin/scrape_software_versions.py
+++ b/bin/scrape_software_versions.py
@@ -3,18 +3,22 @@ from __future__ import print_function
 from collections import OrderedDict
 import re
 
-# TODO nf-core: Add additional regexes for new tools in process get_software_versions
+# Add additional regexes for new tools in process get_software_versions
 regexes = {
-    "nf-core/hic": ["v_pipeline.txt", r"(\S+)"],
-    "Nextflow": ["v_nextflow.txt", r"(\S+)"],
-    "FastQC": ["v_fastqc.txt", r"FastQC v(\S+)"],
-    "MultiQC": ["v_multiqc.txt", r"multiqc, version (\S+)"],
+    'nf-core/hic': ['v_pipeline.txt', r"(\S+)"],
+    'Nextflow': ['v_nextflow.txt', r"(\S+)"],
+    'Bowtie2': ['v_bowtie2.txt', r"Bowtie2 v(\S+)"],
+    'Python': ['v_python.txt', r"Python v(\S+)"],
+    'Samtools': ['v_samtools.txt', r"Samtools v(\S+)"],
+    'MultiQC': ['v_multiqc.txt', r"multiqc, version (\S+)"],
 }
 results = OrderedDict()
-results["nf-core/hic"] = '<span style="color:#999999;">N/A</span>'
-results["Nextflow"] = '<span style="color:#999999;">N/A</span>'
-results["FastQC"] = '<span style="color:#999999;">N/A</span>'
-results["MultiQC"] = '<span style="color:#999999;">N/A</span>'
+results['nf-core/hic'] = '<span style="color:#999999;\">N/A</span>'
+results['Nextflow'] = '<span style="color:#999999;\">N/A</span>'
+results['Bowtie2'] = '<span style="color:#999999;\">N/A</span>'
+results['Python'] = '<span style="color:#999999;\">N/A</span>'
+results['Samtools'] = '<span style="color:#999999;\">N/A</span>'
+results['MultiQC'] = '<span style="color:#999999;\">N/A</span>'
 
 # Search each file using its regex
 for k, v in regexes.items():
@@ -32,6 +36,11 @@ for k in list(results):
     if not results[k]:
         del results[k]
 
+# Remove software set to false in results
+for k in results:
+    if not results[k]:
+        del(results[k])
+
 # Dump to YAML
 print(
     """
@@ -52,3 +61,4 @@ print("    </dl>")
 with open("software_versions.csv", "w") as f:
     for k, v in results.items():
         f.write("{}\t{}\n".format(k, v))
+
diff --git a/bin/src/build_matrix.cpp b/bin/src/build_matrix.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e366d5b7649d3f9eb040a80eee5a5d10572f6593
--- /dev/null
+++ b/bin/src/build_matrix.cpp
@@ -0,0 +1,1037 @@
+// HiC-Pro
+// Copyright 2015 Institut Curie                               
+// Author(s): Eric Viara
+// Contact: nicolas.servant@curie.fr
+// This software is distributed without any guarantee under the terms of the BSD-3 License
+
+#include <iostream>
+#include <iomanip>
+#include <fstream>
+#include <sstream>
+#include <unordered_map>
+#include <map>
+#include <vector>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <math.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/stat.h>
+
+
+static const int SPARSE_FMT = 0x1;
+static const int BED_FMT = 0x2;
+static const char* prog;
+static bool progress = false;
+static bool detail_progress = false;
+static bool quiet = false;
+
+static bool NO_DICHO = getenv("NO_DICHO") != NULL;
+
+typedef unsigned int chrsize_t;
+
+const std::string VERSION = "1.2 [2015-10-20]";
+
+const static chrsize_t BIN_NOT_FOUND = (chrsize_t)-1;
+
+class AxisChromosome;
+
+static bool is_empty_line(const char* buffer)
+{
+  while (char c = *buffer++) {
+    if (c != ' ' || c != '\n' || c != '\t') {
+      return false;
+    }
+  }
+  return true;
+}
+
+static int bed_line_parse(char* buffer, char chr[], chrsize_t& start, chrsize_t& end, const std::string& bedfile, size_t line_num)
+{
+  if (sscanf(buffer, "%s %u %u", chr, &start, &end) != 3) {
+    std::cerr << "bed file \"" << bedfile << "\" at line #" << line_num << " format error\n";
+    return 1;
+  }
+  return 0;
+}
+
+struct Interval {
+  chrsize_t start;
+  chrsize_t end;
+
+  Interval(chrsize_t start = 0, chrsize_t end = 0) : start(start), end(end) { }
+};
+ 
+class ChrRegions {
+
+  std::vector<std::string> chr_v;
+  std::map<std::string, std::vector<Interval>* > intervals;
+
+public:
+  ChrRegions() { }
+
+  int readBedfile(const std::string& bedfile) {
+    std::ifstream ifs(bedfile.c_str());
+    if (ifs.bad() || ifs.fail()) {
+      std::cerr << prog << " cannot open bed file: " << bedfile << " for reading\n";
+      return 1;
+    }
+    char buffer[4096];
+    size_t line_num = 0;
+    chrsize_t lastend = 0;
+    char lastchr[2048] = {0};
+    while (!ifs.eof()) {
+      ifs.getline(buffer, sizeof(buffer)-1);
+      line_num++;
+      if (is_empty_line(buffer)) {
+	continue;
+      }
+      chrsize_t start = 0;
+      chrsize_t end = 0;
+      char chr[2048];
+      if (bed_line_parse(buffer, chr, start, end, bedfile, line_num)) {
+	return 1;
+      }
+      if (intervals.find(chr) == intervals.end()) {
+	intervals[chr] = new std::vector<Interval>();
+	chr_v.push_back(chr);
+      }
+      /*
+      if (lastend != 0 && !strcmp(lastchr, chr) && start != lastend) {
+	std::cerr << "warning: discontinuous segment for chromosome " << chr << " at position " << start << " " << end << std::endl;
+      }
+      */
+      if (*lastchr && strcmp(lastchr, chr)) {
+	lastend = 0;
+      }
+
+      if (lastend != 0 && start < lastend) {
+	std::cerr << "error: bedfile not sorted at line #" << line_num << std::endl;
+	exit(1);
+      }
+      strcpy(lastchr, chr);
+      lastend = end;
+      intervals[chr]->push_back(Interval(start, end));
+      if (progress && (line_num % 100000) == 0) {
+	std::cerr << '.' << std::flush;
+      }
+    }
+    if (progress) {
+      std::cerr << std::endl;
+    }
+    return 0;
+  }
+
+  void displayBed(std::ostream& ofs, const std::vector<AxisChromosome*>& axis_chr) const {
+    std::vector<std::string>::const_iterator begin = chr_v.begin();
+    std::vector<std::string>::const_iterator end = chr_v.end();
+    unsigned int num = 1;
+    while (begin != end) {
+      const std::string& chrname = *begin;
+      std::map<std::string, std::vector<Interval>* >::const_iterator iter = intervals.find(chrname);
+      assert(iter != intervals.end());
+      const std::vector<Interval>* itv_vect = (*iter).second;
+      std::vector<Interval>::const_iterator itv_begin = itv_vect->begin();
+      std::vector<Interval>::const_iterator itv_end = itv_vect->end();
+      while (itv_begin != itv_end) {
+	const Interval& itv = (*itv_begin);
+	ofs << chrname << '\t' << itv.start << '\t' << itv.end << '\t' << num << '\n';
+	if (progress && (num % 100000) == 0) {
+	  std::cerr << '.' << std::flush;
+	}
+	num++;
+	++itv_begin;
+      }
+      ++begin;
+    }
+    if (progress) {
+      std::cerr << std::endl;
+    }
+  }
+
+  const std::vector<Interval>* getIntervalsFromChr(const std::string& chr) const {
+    std::map<std::string, std::vector<Interval>* >::const_iterator iter = intervals.find(chr);
+    if (iter != intervals.end()) {
+      return (*iter).second;
+    }
+    return NULL;
+  }
+};
+
+class Dichotomic {
+
+  int min, max;
+  const std::vector<Interval>& intervals;
+
+public:
+  Dichotomic(const std::vector<Interval>& intervals) : intervals(intervals) {
+    //min = middle(intervals[0]);
+    //max = middle(intervals[intervals.size()-1]);
+    min = 0;
+    max = intervals.size()-1;
+  }
+
+  static chrsize_t middle(const Interval& itv) {
+    return (itv.start+1 + itv.end) / 2;
+  }
+
+  int find(chrsize_t value) {
+    int l = min;
+    int r = max;
+    int n = 0;
+    while (l <= r) {
+      n = (l + r) >> 1;
+      const Interval& itv = intervals[n];
+      if (value >= itv.start+1 && value <= itv.end) {
+	return n;
+      }
+
+      int x = middle(itv) - value;
+      
+      if (x < 0) {
+	l = n + 1;
+      } else {
+	r = n - 1;
+      }
+      //std::cout << "l: " << l << '\n';
+      //std::cout << "r: " << r << '\n';
+    }
+
+    return -1;
+  }
+};
+
+class Chromosome {
+
+private:
+  static std::unordered_map<std::string, Chromosome*> chr_map;
+
+  void computeSizes(chrsize_t ori_binsize, chrsize_t step, bool binadjust, const ChrRegions* chr_regions);
+
+  std::string name;
+
+  chrsize_t chrsize;
+
+  chrsize_t binsize;
+  chrsize_t stepsize;
+  chrsize_t bincount;
+
+  const ChrRegions* chr_regions;
+
+public:
+  Chromosome(const std::string& name, chrsize_t chrsize, chrsize_t ori_binsize, chrsize_t step, bool binadjust, const ChrRegions* chr_regions) : name(name), chrsize(chrsize), chr_regions(chr_regions) {
+    computeSizes(ori_binsize, step, binadjust, chr_regions);
+    assert(chr_map.find(name) == chr_map.end());
+    chr_map[name] = this;
+  }
+
+  void adjustBinsize(chrsize_t ori_binsize, const chrsize_t step);
+
+  const std::string& getName() const {return name;}
+  chrsize_t getChrsize() const {return chrsize;}
+  chrsize_t getBinsize() const {return binsize;}
+  chrsize_t getStepsize() const {return stepsize;}
+  chrsize_t getBincount() const {return bincount;}
+
+  const ChrRegions* getChrRegions() const {return chr_regions;}
+
+  static chrsize_t getCount() {
+    return chr_map.size();
+  }
+
+  static Chromosome* getByName(const std::string& name) {
+    return chr_map[name];
+  }
+};
+
+class AxisChromosome {
+  int idx; // really needed ?
+  const Chromosome* chr;
+  chrsize_t binstart;
+  chrsize_t binend;
+
+public:
+  AxisChromosome(int binoffset, const Chromosome* chr, const AxisChromosome* lastAxisChr) : chr(chr) {
+    if (lastAxisChr != NULL) {
+      binstart = lastAxisChr->getBinend();
+    } else {
+      binstart = binoffset;
+    }
+    binend = binstart + chr->getBincount();
+    /*
+    if (verbose) {
+      std::cerr << "AxisChromosome: " << chr->getName() << " " << binstart << " " << binend << " " << chr->getBincount() << std::endl;
+    }
+    */
+  }
+
+  chrsize_t getBinstart() const {return binstart;}
+  chrsize_t getBinend() const {return binend;}
+  chrsize_t getChrsize() const {return chr->getChrsize();}
+  chrsize_t getBinsize() const {return chr->getBinsize();}
+  chrsize_t getStepsize() const {return chr->getStepsize();}
+  chrsize_t getBincount() const {return chr->getBincount();}
+
+  const Chromosome* getChromosome() const {return chr;}
+
+  chrsize_t assign_bin(const std::string& org, chrsize_t start) const {
+    const ChrRegions* chr_regions = chr->getChrRegions();
+    if (chr_regions != NULL) {
+      const std::vector<Interval>* intervals = chr_regions->getIntervalsFromChr(chr->getName());
+      assert(intervals != NULL);
+
+      if (!NO_DICHO) {
+	Dichotomic dicho(*intervals);
+	int where = dicho.find(start);
+	if (where < 0) {
+	  if (!quiet) {
+	    std::cerr << "warning: no bin at position " << chr->getName() << ":" << start << std::endl;
+	  }
+	  return BIN_NOT_FOUND;
+	}
+	return where + getBinstart();
+      }
+
+      std::vector<Interval>::const_iterator begin = intervals->begin();
+      std::vector<Interval>::const_iterator end = intervals->end();
+
+      chrsize_t binidx = 1;
+      while (begin != end) {
+	const Interval& itv = *begin;
+	if (start >= itv.start+1 && start <= itv.end) {
+	  break;
+	}
+	++binidx;
+	++begin;
+      }
+      
+      return binidx + getBinstart() - 1;
+    }
+
+    int loc = (int)start;
+    int binsize = getBinsize();
+    int stepsize = getStepsize();
+    int cur_binidx = 1 + ceil((double)(loc-binsize)/stepsize);
+    int cur_binbeg = stepsize * (cur_binidx-1)+1;
+    int cur_binend = cur_binbeg + binsize-1;
+    int chrsize = getChrsize();
+    if (cur_binend > chrsize) {
+      cur_binend = chrsize;
+    } 
+    return cur_binidx + getBinstart() - 1;
+  }
+};
+
+class Matrix {
+
+  std::vector<AxisChromosome*> axis_chr_abs;
+  std::vector<AxisChromosome*> axis_chr_ord;
+  std::unordered_map<std::string, AxisChromosome*> axis_chr_abs_map;
+  std::unordered_map<std::string, AxisChromosome*> axis_chr_ord_map;
+
+  std::map<chrsize_t, std::map<chrsize_t, chrsize_t> > mat;
+
+  void addAxisChromosome(const std::vector<const Chromosome*>& chr_v, std::vector<AxisChromosome*>& axis_chr, std::unordered_map<std::string, AxisChromosome*>& axis_chr_map);
+
+  const AxisChromosome* getAxisChromosome(const std::string& chrname, const std::unordered_map<std::string, AxisChromosome*>& axis_chr_map) const {
+    std::unordered_map<std::string, AxisChromosome*>::const_iterator iter = axis_chr_map.find(chrname);
+    if (iter == axis_chr_map.end()) {
+      return NULL;
+    }
+    return (*iter).second;
+  }
+
+  void displayBed(std::ostream& ofs, const std::vector<AxisChromosome*>& axis_chr) const {
+    std::vector<AxisChromosome*>::const_iterator begin = axis_chr.begin();
+    std::vector<AxisChromosome*>::const_iterator end = axis_chr.end();
+    while (begin != end) {
+      const AxisChromosome* axis_chr = *begin;
+      const std::string& name = axis_chr->getChromosome()->getName();
+      chrsize_t binstart = axis_chr->getBinstart();
+      chrsize_t binend = axis_chr->getBinend();
+      chrsize_t binsize = axis_chr->getBinsize();
+      chrsize_t chrsize = axis_chr->getChrsize();
+      binend -= binstart;
+      for (chrsize_t bin = 0; bin < binend; ++bin) {
+	// bed are 0-based begin, 1-based end
+	chrsize_t beg = bin * binsize;
+	chrsize_t end = beg + binsize - 1;
+	if (end > chrsize) {
+	  end = chrsize-1;
+	}
+	ofs << name << '\t' << beg << '\t' << (end+1) << '\t' << (bin+binstart) << '\n';
+      }
+      ++begin;
+    }
+  }
+
+  int binoffset;
+
+public:
+  Matrix(int binoffset) : binoffset(binoffset) {}
+
+  void addXAxisChromosome(const std::vector<const Chromosome*>& chr_v);
+  void addYAxisChromosome(const std::vector<const Chromosome*>& chr_v);
+
+  const AxisChromosome* getXAxisChromosome(const std::string& chrname) const {
+    return getAxisChromosome(chrname, axis_chr_abs_map);
+  }
+
+  const AxisChromosome* getYAxisChromosome(const std::string& chrname) const {
+    return getAxisChromosome(chrname, axis_chr_ord_map);
+  }
+
+  void add(chrsize_t abs_bin, chrsize_t ord_bin) {
+    std::map<chrsize_t, std::map<chrsize_t, chrsize_t> >::iterator iter = mat.find(abs_bin);
+    if (iter == mat.end()) {
+      mat[abs_bin] = std::map<chrsize_t, chrsize_t>();
+      mat[abs_bin][ord_bin] = 1;
+    } else {
+      (*iter).second[ord_bin]++;
+    }
+  }
+
+  void displayMatrix(std::ostream& ofs) const {
+    std::map<chrsize_t, std::map<chrsize_t, chrsize_t> >::const_iterator begin = mat.begin();
+    std::map<chrsize_t, std::map<chrsize_t, chrsize_t> >::const_iterator end = mat.end();
+    size_t line_total = 0;
+    if (progress) {
+      while (begin != end) {
+	const std::map<chrsize_t, chrsize_t>& line = (*begin).second;
+	line_total += line.size();
+	++begin;
+      }
+      begin = mat.begin();
+    }
+
+    size_t line_cnt = 1;
+    if (progress) {
+      std::cerr << "\n=================\n";
+      std::cerr << " Dumping matrix\n";
+      std::cerr << "=================\n\n";
+    }
+    size_t modulo = line_total / 1000;
+    while (begin != end) {
+      chrsize_t abs = (*begin).first;
+      const std::map<chrsize_t, chrsize_t>& line = (*begin).second;
+      std::map<chrsize_t, chrsize_t>::const_iterator bb = line.begin();
+      std::map<chrsize_t, chrsize_t>::const_iterator ee = line.end();
+      while (bb != ee) {
+	if (progress && (line_cnt % modulo) == 0) {
+	  double percent = (double(line_cnt)/line_total)*100;
+	  std::cerr << "" << percent << "% " << line_cnt << " / " << line_total << std::endl;
+	}
+	ofs << abs << '\t' << (*bb).first << '\t' << (*bb).second << '\n';
+	line_cnt++;
+	++bb;
+      }
+      ++begin;
+    }
+  }
+
+  void displayXBed(std::ostream& ofs) const {
+    displayBed(ofs, axis_chr_abs);
+  }
+
+  void displayYBed(std::ostream& ofs) const {
+    displayBed(ofs, axis_chr_ord);
+  }
+
+  const std::vector<AxisChromosome*>& getXAxisChromosomes() {return axis_chr_abs;}
+  const std::vector<AxisChromosome*>& getYAxisChromosomes() {return axis_chr_ord;}
+};
+
+void Matrix::addAxisChromosome(const std::vector<const Chromosome*>& chr_v, std::vector<AxisChromosome*>& axis_chr, std::unordered_map<std::string, AxisChromosome*>& axis_chr_map)
+{
+  std::vector<const Chromosome*>::const_iterator begin = chr_v.begin();
+  std::vector<const Chromosome*>::const_iterator end = chr_v.end();
+
+  const AxisChromosome* lastAxisChr = NULL;
+  while (begin != end) {
+    const Chromosome* chr = *begin;
+    AxisChromosome* axisChr = new AxisChromosome(binoffset, chr, lastAxisChr);
+    axis_chr.push_back(axisChr);
+    axis_chr_map[chr->getName()] = axisChr;
+    lastAxisChr = axisChr;
+    ++begin;
+  }
+}
+
+void Matrix::addXAxisChromosome(const std::vector<const Chromosome*>& chr_v)
+{
+  addAxisChromosome(chr_v, axis_chr_abs, axis_chr_abs_map);
+}
+
+void Matrix::addYAxisChromosome(const std::vector<const Chromosome*>& chr_v)
+{
+  addAxisChromosome(chr_v, axis_chr_ord, axis_chr_ord_map);
+}
+
+std::unordered_map<std::string, Chromosome*> Chromosome::chr_map;
+
+enum Format {
+  SPARSE_IND_FMT = SPARSE_FMT,
+  SPARSE_BED_FMT = SPARSE_FMT|BED_FMT,
+  EXPANDED_FMT = 0x4
+};
+
+void Chromosome::adjustBinsize(chrsize_t ori_binsize, const chrsize_t step)
+{
+  bincount = 1 + (chrsize_t)floor( (double)(chrsize-ori_binsize) / (ori_binsize/step));
+  binsize = chrsize / bincount;
+  stepsize = binsize / step;
+}
+
+void Chromosome::computeSizes(chrsize_t ori_binsize, chrsize_t step, bool binadjust, const ChrRegions* chr_regions)
+{
+  if (NULL != chr_regions) {
+    const std::vector<Interval>* intervals = chr_regions->getIntervalsFromChr(name);
+    assert(intervals != NULL);
+    bincount = intervals->size();
+    /*
+    if (verbose) {
+      std::cerr << name << " bincount: " << bincount << std::endl;
+    }
+    */
+  } else {
+    if (chrsize < ori_binsize) {
+      binsize = chrsize;
+      stepsize = chrsize;
+      bincount = 1;
+    } else if (binadjust) {
+      adjustBinsize(ori_binsize, step);
+    } else {
+      binsize = ori_binsize;
+      stepsize = (chrsize_t)floor(ori_binsize/step);
+      chrsize_t remainder = (chrsize - ori_binsize) % stepsize;
+      chrsize_t tmp_bincount = 1 + (chrsize_t)floor(chrsize-ori_binsize)/stepsize;
+      bincount = remainder > 0 ? tmp_bincount+1 : tmp_bincount;
+    }
+    /*
+    if (verbose) {
+      std::cerr << name << " sizes: " << chrsize << " " << binsize << " " << stepsize << " " << bincount << std::endl;
+    }
+    */
+  }
+}
+
+static int usage(int ret = 1)
+{
+  std::cerr << "\nusage: " << prog << " --binsize BINSIZE|--binfile --chrsizes FILE --ifile FILE\n";
+  std::cerr << "       --oprefix PREFIX [--binadjust] [--step STEP] [--binoffset OFFSET]\n";
+  std::cerr << "       [--matrix-format asis|upper|lower|complete][--chrA CHR... --chrB CHR...] [--quiet] [--progress] [--detail-progress]\n";
+  std::cerr << "\nusage: " << prog << " --version\n";
+  std::cerr << "\nusage: " << prog << " --help\n";
+  return ret;
+}
+
+static int help()
+{
+  (void)usage();
+  std::cerr << "\nOPTIONS\n\n";
+  std::cerr << "  --version              : display version\n";
+  std::cerr << "  --binsize BINSIZE      : bin size\n";
+  std::cerr << "  --binfile BEDFILE      : bed file containing bins (chr start end)\n";
+  std::cerr << "  --chrsizes FILE        : file containing chromosome sizes\n";
+  std::cerr << "  --ifile FILE           : input interaction file\n";
+  std::cerr << "  --oprefix PREFIX       : output prefix of generated files (matrix and bed)\n";
+  std::cerr << "  --binadjust            : [optional] adjust bin sizes, default is false\n";
+  std::cerr << "  --step STEP            : [optional] step size, default is 1\n";
+  std::cerr << "  --binoffset OFFSET     : [optional] starting bin offset, default is 1\n";
+  std::cerr << "  --matrix-format FORMAT : [optional] FORMAT may be:\n";
+  std::cerr << "                           - asis: matrix is generated according to input data (default)\n";
+  std::cerr << "                           - upper: only the upper matrix is generated\n";
+  std::cerr << "                           - lower: only the lower matrix is generated\n";
+  std::cerr << "                           - complete: generate both parts of the matrix (upper and lower);\n";
+  std::cerr << "                             input data must contain only one part (upper or lower) \n";
+  std::cerr << "  --chrA CHR             : [optional] colon separated list of abscissa chromosomes; default is all chromosomes\n";
+  std::cerr << "  --chrB CHR             : [optional] colon separated list of ordinate chromosomes; default is all chromosomes\n";
+  std::cerr << "  --quiet                : do not display any warning\n";
+  std::cerr << "  --progress             : display progress\n";
+  std::cerr << "  --detail-progress      : display detail progress (needs preliminary steps consuming time)\n";
+  return -1;
+}
+
+enum MatrixFormat {
+  ASIS_MATRIX = 1,
+  UPPER_MATRIX,
+  LOWER_MATRIX,
+  COMPLETE_MATRIX
+};
+  
+static int get_options(int argc, char* argv[], chrsize_t& binsize, const char*& binfile, const char*& chrsize_file, const char*& ifile, const char*& oprefix, Format& format, std::string& bed_prefix, bool& binadjust, MatrixFormat& matrix_format, chrsize_t& step, bool& whole_genome, int& binoffset, const char*& chrA, const char*& chrB)
+{
+  prog = argv[0];
+  for (int ac = 1; ac < argc; ++ac) {
+    const char* opt = argv[ac];
+    if (*opt == '-') {
+      if (!strcmp(opt, "--binadjust")) {
+	binadjust = true;
+      } else if (!strcmp(opt, "--version")) {
+	std::cout << "build_matrix version " << VERSION << "\n";
+	exit(0);
+      } else if (!strcmp(opt, "--progress")) {
+	progress = true;
+      } else if (!strcmp(opt, "--quiet")) {
+	quiet = true;
+      } else if (!strcmp(opt, "--detail-progress")) {
+	progress = true;
+	detail_progress = true;
+      } else if (!strcmp(opt, "--matrix-format")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	std::string matrix_format_str = argv[++ac];
+	if (matrix_format_str == "asis") {
+	  matrix_format = ASIS_MATRIX;
+	} else if (matrix_format_str == "upper") {
+	  matrix_format = UPPER_MATRIX;
+	} else if (matrix_format_str == "lower") {
+	  matrix_format = LOWER_MATRIX;
+	} else if (matrix_format_str == "complete") {
+	  matrix_format = COMPLETE_MATRIX;
+	} else {
+	  return usage();
+	}
+      } else if (!strcmp(opt, "--step")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	step = atoi(argv[++ac]);
+      } else if (!strcmp(opt, "--binfile")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	binfile = argv[++ac];
+      } else if (!strcmp(opt, "--binsize")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	binsize = atoi(argv[++ac]);
+      } else if (!strcmp(opt, "--binoffset")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	binoffset = atoi(argv[++ac]);
+      } else if (!strcmp(opt, "--ifile")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	ifile = argv[++ac];
+      } else if (!strcmp(opt, "--oprefix")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	oprefix = argv[++ac];
+      } else if (!strcmp(opt, "--chrsizes")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	chrsize_file = argv[++ac];
+      } else if (!strcmp(opt, "--chrA")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	chrA = argv[++ac];
+	whole_genome = false;
+      } else if (!strcmp(opt, "--chrB")) {
+	if (ac == argc-1) {
+	  return usage();
+	}
+	chrB = argv[++ac];
+	whole_genome = false;
+      } else if (!strcmp(opt, "--help")) {
+	return help();
+      } else {
+	std::cerr << '\n' << prog << ": unknown option " << opt << std::endl;
+	return usage();
+      }
+    }
+  }
+
+  return 0;
+}
+
+static void split_in_vect(const std::string& str, std::vector<const Chromosome*>& vect)
+{
+  size_t last_pos = 0;
+  while (size_t pos = str.find(':', last_pos)) {
+    std::string chrname;
+    bool last = pos == std::string::npos;
+    if (last) {
+      chrname = str.substr(last_pos);
+    } else {
+      chrname = str.substr(last_pos, pos-last_pos);
+    }
+    const Chromosome* chr = Chromosome::getByName(chrname);
+    if (!chr) {
+      std::cerr << prog << ": unknown chromosome " << chrname << std::endl;
+      exit(1);
+    }
+    vect.push_back(chr);
+    if (last) {
+      break;
+    }
+    last_pos = pos+1;
+  }
+}
+
+static int interaction_parse(char* buffer, char*& lchr, chrsize_t& lstart, char*& rchr, chrsize_t& rstart)
+{
+  char c;
+  char* str;
+  while ((c = *buffer++) != 0) {
+    if (c == '\t') {
+      lchr = buffer;
+      break;
+    }
+  }
+  while ((c = *buffer) != 0) {
+    if (c == '\t') {
+      *buffer++ = 0;
+      str = buffer;
+      break;
+    }
+    buffer++;
+  }
+
+  while ((c = *buffer) != 0) {
+    if (c == '\t') {
+      *buffer++ = 0;
+      lstart = atoi(str);
+      break;
+    }
+    buffer++;
+  }
+
+  while ((c = *buffer++) != 0) {
+    if (c == '\t') {
+      rchr = buffer;
+      break;
+    }
+  }
+
+  while ((c = *buffer) != 0) {
+    if (c == '\t') {
+      *buffer++ = 0;
+      str = buffer;
+      break;
+    }
+    buffer++;
+  }
+
+  while ((c = *buffer) != 0) {
+    if (c == '\t') {
+      *buffer++ = 0;
+      rstart = atoi(str);
+      break;
+    }
+    buffer++;
+  }
+
+  return 0;
+}
+
+static char p_buffer[512000];
+
+static int build_matrix_init(Matrix& matrix, const char* ifile, std::ifstream& ifs, const std::string& oprefix, std::ofstream& matfs, std::ofstream& xbedfs, std::ofstream& ybedfs, const char* chrsize_file, bool whole_genome, const char* chrA, const char* chrB, chrsize_t ori_binsize, const char* binfile, chrsize_t step, bool binadjust, ChrRegions*& chr_regions, size_t& line_total)
+{
+  ifs.open(ifile);
+  if (ifs.bad() || ifs.fail()) {
+    std::cerr << prog << " cannot open interaction file: " << ifile << " for reading\n";
+    return 1;
+  }
+
+  if (detail_progress) {
+    if (progress) {
+      std::cerr << "\n======================================\n";
+      std::cerr << " Getting information for progress bar\n";
+      std::cerr << "======================================\n\n";
+    }
+    std::cerr << std::setprecision(2) << std::fixed;
+    int fd = open(ifile, O_RDONLY);
+    struct stat st;
+    assert(fstat(fd, &st) == 0);
+    assert(fd >= 0);
+    int nn;
+    int cnt = 1;
+    while ((nn = read(fd, p_buffer, sizeof(p_buffer))) > 0) {
+      const char *p = p_buffer;
+      while (nn-- > 0) {
+	if (*p++ == '\n') {
+	  line_total++;
+	}
+      }
+      if ((cnt % 200) == 0) {
+	std::cerr << '.' << std::flush;
+      }
+      cnt++;
+    }
+    std::cerr << std::endl;
+    close(fd);
+  }
+  
+  std::ifstream chrsizefs;
+  chrsizefs.open(chrsize_file);
+  if (chrsizefs.bad() || chrsizefs.fail()) {
+    std::cerr << prog << " cannot open chrsizes file: " << chrsize_file << " for reading\n";
+    return 1;
+  }
+
+  std::string matfile = oprefix + ".matrix";
+  matfs.open(matfile);
+  if (matfs.bad() || matfs.fail()) {
+    std::cerr << prog << " cannot open file: " << matfile << " for writing\n";
+    return 1;
+  }
+
+  std::string xbedfile = oprefix + "_abs.bed";
+  xbedfs.open(xbedfile);
+  if (xbedfs.bad() || xbedfs.fail()) {
+    std::cerr << prog << " cannot open file: " << xbedfile << " for writing\n";
+    return 1;
+  }
+
+  std::string ybedfile = oprefix + "_ord.bed";
+  if (!whole_genome) {
+    //std::string xbedlink;
+    //size_t pos = xbedfile.rfind('/');
+    //if (pos != std::string::npos) {
+    //  xbedlink = xbedfile.substr(pos+1);
+    //} else {
+    //  xbedlink = xbedfile;
+    //}
+    //unlink(ybedfile.c_str());
+    //if (symlink(xbedlink.c_str(), ybedfile.c_str())) {
+    //  std::cerr << prog << " cannot created link: " << ybedfile << "\n";
+    //  return 1;
+    //}
+    //} else {
+    ybedfs.open(ybedfile);
+    if (ybedfs.bad() || ybedfs.fail()) {
+      std::cerr << prog << " cannot open file: " << ybedfile << " for writing\n";
+      return 1;
+    }
+  }
+
+  chr_regions = NULL;
+  if (NULL != binfile) {
+    chr_regions = new ChrRegions();
+    if (progress) {
+      std::cerr << "\n=================\n";
+      std::cerr << " Reading binfile\n";
+      std::cerr << "=================\n\n";
+    }
+    if (chr_regions->readBedfile(binfile)) {
+      return 1;
+    }
+  }
+
+  std::vector<const Chromosome*> all_chr_v;
+  while (!chrsizefs.eof()) {
+    std::string buffer;
+    getline(chrsizefs, buffer);
+
+    chrsize_t chrsize;
+    std::istringstream istr(buffer);
+    std::string name;
+    istr >> name >> chrsize;
+    if (!istr.fail()) {
+      Chromosome* chromosome = new Chromosome(name, chrsize, ori_binsize, step, binadjust, chr_regions);
+      all_chr_v.push_back(chromosome);
+    }
+  }
+
+  chrsizefs.close();
+
+  if (chrA) {
+    assert(chrB != NULL);
+    std::vector<const Chromosome*> chrA_v;
+    std::vector<const Chromosome*> chrB_v;
+    split_in_vect(chrA, chrA_v);
+    split_in_vect(chrB, chrB_v);
+    matrix.addXAxisChromosome(chrA_v);
+    matrix.addYAxisChromosome(chrB_v);
+  } else {
+    matrix.addXAxisChromosome(all_chr_v);
+    matrix.addYAxisChromosome(all_chr_v);
+  }
+
+  return 0;
+}
+
+static int build_matrix(int binoffset, chrsize_t ori_binsize, const char* binfile, const char* chrsize_file, const char* ifile, const char* oprefix, Format _dummy_format, const std::string& _dummy_bed_prefix, bool binadjust, MatrixFormat matrix_format, chrsize_t step, bool whole_genome, const char* chrA, const char* chrB)
+{
+  std::ifstream ifs;
+  std::ofstream matfs, xbedfs, ybedfs;
+
+  Matrix matrix(binoffset);
+  ChrRegions *chr_regions = NULL;
+  size_t line_total = 0;
+  if (int ret = build_matrix_init(matrix, ifile, ifs, oprefix, matfs, xbedfs, ybedfs, chrsize_file, whole_genome, chrA, chrB, ori_binsize, binfile, step, binadjust, chr_regions, line_total)) {
+    return ret;
+  }
+
+  if (progress) {
+    std::cerr << "\n=================\n";
+    std::cerr << " Building matrix\n";
+    std::cerr << "=================\n\n";
+  }
+  size_t line_cnt = 1;
+  size_t line_num = 0;
+  char buffer[4096];
+  std::string lmark, rmark, lorg, rorg;
+  while (!ifs.eof()) {
+    ifs.getline(buffer, sizeof(buffer)-1);
+    line_num++;
+    if (is_empty_line(buffer)) {
+      continue;
+    }
+    chrsize_t lstart = 0;
+    chrsize_t rstart = 0;
+    char* lchr = NULL;
+    char* rchr = NULL;
+    interaction_parse(buffer, lchr, lstart, rchr, rstart);
+    const AxisChromosome* abs_chr = matrix.getXAxisChromosome(lchr);
+    if (!abs_chr) {
+      continue;
+    }
+    const AxisChromosome* ord_chr = matrix.getYAxisChromosome(rchr);
+    if (!ord_chr) {
+      continue;
+    }
+    chrsize_t abs_bin = abs_chr->assign_bin(lorg, lstart);
+    if (abs_bin == BIN_NOT_FOUND) {
+      continue;
+    }
+    chrsize_t ord_bin = ord_chr->assign_bin(rorg, rstart);
+    if (ord_bin == BIN_NOT_FOUND) {
+      continue;
+    }
+    switch(matrix_format) {
+
+    case ASIS_MATRIX:
+      matrix.add(abs_bin, ord_bin);
+      break;
+
+    case UPPER_MATRIX:
+      if (abs_bin < ord_bin) {
+	matrix.add(abs_bin, ord_bin);
+      } else {
+	matrix.add(ord_bin, abs_bin);
+      }
+      break;
+
+    case LOWER_MATRIX:
+      if (abs_bin > ord_bin) {
+	matrix.add(abs_bin, ord_bin);
+      } else {
+	matrix.add(ord_bin, abs_bin);
+      }
+      break;
+
+    case COMPLETE_MATRIX:
+      matrix.add(abs_bin, ord_bin);
+      if (abs_bin != ord_bin) {
+	matrix.add(ord_bin, abs_bin);
+      }
+      break;
+    }
+    line_cnt++;
+    if (progress && (line_cnt % 100000) == 0) {
+      if (detail_progress) {
+	double percent = (double(line_cnt)/line_total)*100;
+	std::cerr << "" << percent << "% " << line_cnt << " / " << line_total << std::endl;
+      } else {
+	std::cerr << line_cnt << std::endl;
+      }
+    }
+  }
+
+  if (progress) {
+    std::cerr << "\n==================\n";
+    std::cerr << " Dumping bedfiles\n";
+    std::cerr << "==================\n\n";
+  }
+
+  if (NULL != chr_regions) {
+    chr_regions->displayBed(xbedfs, matrix.getXAxisChromosomes());
+    if (!whole_genome) {
+      chr_regions->displayBed(ybedfs, matrix.getYAxisChromosomes());
+    }
+  } else {
+    matrix.displayXBed(xbedfs);
+    if (!whole_genome) {
+      matrix.displayYBed(ybedfs);
+    }
+  }
+  matrix.displayMatrix(matfs);
+  xbedfs.close();
+  ybedfs.close();
+  matfs.close();
+  return 0;
+}
+
+int main(int argc, char* argv[])
+{
+  chrsize_t step = 1;
+  bool binadjust = false;
+  MatrixFormat matrix_format = ASIS_MATRIX;
+  chrsize_t binsize = 0;
+  const char* ifile = NULL;
+  const char* oprefix = NULL;
+  const char* chrA = NULL;
+  const char* chrB = NULL;
+  const char* chrsize_file = NULL;
+  const char* binfile = NULL;
+  bool whole_genome = true;
+  int binoffset = 1;
+  std::string bed_prefix;
+  Format format = SPARSE_BED_FMT;
+
+  if (int ret = get_options(argc, argv, binsize, binfile, chrsize_file, ifile, oprefix, format, bed_prefix, binadjust, matrix_format, step, whole_genome, binoffset, chrA, chrB)) {
+    if (ret < 0) {
+      return 0;
+    }
+    return ret;
+  }
+
+  if (!binsize && !binfile) {
+    std::cerr << '\n';
+    std::cerr << prog << ": missing --binsize or --binfile option\n";
+    return usage();
+  }
+
+  if (!chrsize_file) {
+    std::cerr << '\n';
+    std::cerr << prog << ": missing --chrsizes option\n";
+    return usage();
+  }
+
+  if (!ifile) {
+    std::cerr << '\n';
+    std::cerr << prog << ": missing --ifile option\n";
+    return usage();
+  }
+
+  if (!oprefix) {
+    std::cerr << '\n';
+    std::cerr << prog << ": missing --oprefix option\n";
+    return usage();
+  }
+
+  if ((chrA && !chrB) || (!chrA && chrB)) {
+    std::cerr << '\n';
+    std::cerr << prog << ": options --chrA and --chrB must be set simultanously\n";
+    return usage();
+  }
+
+  if (binfile && binsize) {
+    std::cerr << '\n';
+    std::cerr << prog << ": options --binfile and --binsize cannot be set simultanously\n";
+    return usage();
+  }
+
+  return build_matrix(binoffset, binsize, binfile, chrsize_file, ifile, oprefix, format, bed_prefix, binadjust, matrix_format, step, whole_genome, chrA, chrB);
+}
diff --git a/bin/src/cutsite_trimming.cpp b/bin/src/cutsite_trimming.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ef3fa869cd3bfe5f4e473908224cb42c2b99cbfe
--- /dev/null
+++ b/bin/src/cutsite_trimming.cpp
@@ -0,0 +1,153 @@
+// HiC-Pro
+// Copyright 2015 Institut Curie                               
+// Author(s): Nicolas Servant
+// Contact: nicolas.servant@curie.fr
+// This software is distributed without any guarantee under the terms of the BSD-3 licence
+
+// g++ -std=c++0x -o cutsite_trimming cutsite_trimming.cpp
+//./cutsite_trimming -fastq fastq -cutsite AGCTT
+
+
+#include <iostream>     // std::cout
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+#include <fstream>
+
+static const char* prog;
+
+static int usage(int ret=1)
+{
+  std::cerr << "usage: " << prog << " --fastq FASTQFILE --cutsite CUTSITE --out OUTFILE [--rmuntrim] \n";
+  std::cerr << "usage: " << prog << " --help\n";
+  return ret;
+}
+
+static int get_options(int argc, char* argv[], std::string& fastqFile,
+                       std::vector<std::string>& cutSites, std::string& output, bool& rmuntrim)
+{
+  prog = argv[0];
+  if (argc == 1){
+    exit(usage());
+  }
+  for (int ac = 1; ac < argc; ++ac) {
+    const char* opt = argv[ac];
+    if (*opt == '-') {
+      if (!strcmp(opt, "--fastq")) {
+        fastqFile = std::string(argv[++ac]);
+      } else if (!strcmp(opt, "--cutsite")) {
+
+        std::string cutSitesSequence;
+        cutSitesSequence = std::string(argv[++ac]);
+        size_t pos = cutSitesSequence.find(",");
+        size_t begin = 0;
+        while(pos != std::string::npos){
+          cutSites.push_back(cutSitesSequence.substr(begin, pos - begin));
+          begin = pos + 1;
+          pos = cutSitesSequence.find(",", begin + 1);
+        }
+        cutSites.push_back(cutSitesSequence.substr(begin, pos));
+
+      } 
+      else if (!strcmp(opt, "--out")) {
+        output = std::string(argv[++ac]);
+      }
+      else if (!strcmp(opt, "--rmuntrim")) {
+        rmuntrim = true;
+      }
+    }else {
+      std::cerr << prog << ": unknown option " << opt << std::endl;
+      return usage();
+    } 
+  }
+  return 0;
+}
+
+static int trim_fastq(std::string& fastqFile,
+                      std::vector<std::string>& cutSites,
+                      std::string& outFile, bool& rmuntrim)
+{
+
+  int trim_count=0;
+  std::string ID;
+  std::ifstream ifs (fastqFile);
+  std::ofstream ofs (outFile);
+
+  if (ifs.is_open()){
+    while (getline(ifs, ID)) {
+      std::string seq;
+      std::string dummy;
+      std::string qual;
+      
+      getline(ifs, seq);
+      getline(ifs, dummy);
+      getline(ifs, qual);
+
+      bool find_pos = false;
+      size_t pos = std::string::npos;
+      for (std::vector<std::string>::iterator it = cutSites.begin(); it != cutSites.end(); ++it){
+        size_t tmp_pos = seq.find(*it);
+        if (tmp_pos != std::string::npos) {
+          // If find_pos is alread True, there is a problem (there are two cut
+          // sites in the same read).)
+          if (find_pos == true){
+            if(tmp_pos < pos) {
+              pos = tmp_pos;
+            }
+          } else {
+            find_pos = true;
+            pos = tmp_pos;
+          }
+        }
+      }
+      
+      if (pos != std::string::npos) {
+        trim_count++;
+        ofs << ID << '\n';
+        ofs << seq.substr(0, pos) << '\n';
+        ofs << "+\n";
+        ofs << qual.substr(0, pos) << '\n';
+      } else {
+        if (!rmuntrim){
+          ofs << ID << '\n';
+          ofs << seq << '\n';
+          ofs << "+\n";
+          ofs << qual << '\n';
+        }
+      }
+      find_pos = false;
+    }
+  }else{
+    std::cerr << "Error : Cannot open file : " << fastqFile;
+  }
+  return trim_count;
+}
+
+int main(int argc, char* argv[])
+{
+  
+  std::string fastqFile;
+  std::vector<std::string> cutSites;
+  std::string outFile;
+  bool rmuntrim = false;
+
+  int ret = get_options(argc, argv, fastqFile, cutSites, outFile, rmuntrim);
+  printf("##Fastq file: %s\n", fastqFile.c_str());
+  printf("##Restriction sites:\n");
+  for(std::vector<std::string>::iterator it = cutSites.begin(); it != cutSites.end(); ++it){
+    std::cout << *it << std::endl;
+  }
+  printf("##Output File: %s\n", outFile.c_str());
+
+  if (fastqFile.empty() || cutSites.size() == 0 || outFile.empty()){
+    usage();
+    exit(ret);
+  }
+
+  int trim_count=trim_fastq(fastqFile, cutSites, outFile, rmuntrim);
+  printf("\n##Trimmed reads: %d\n", trim_count);
+  return(0);
+ }
+
+
+
diff --git a/conf/base.config b/conf/base.config
index 0b2ea226659efbfec4eb0289d8880a6e9df3c8b6..157dd9548a110b9f2f710d3072850608fa9c2de5 100644
--- a/conf/base.config
+++ b/conf/base.config
@@ -10,8 +10,7 @@
  */
 
 process {
-
-  // TODO nf-core: Check the defaults for all processes
+  // nf-core: Check the defaults for all processes
   cpus = { check_max( 1 * task.attempt, 'cpus' ) }
   memory = { check_max( 7.GB * task.attempt, 'memory' ) }
   time = { check_max( 4.h * task.attempt, 'time' ) }
@@ -20,32 +19,28 @@ process {
   maxRetries = 1
   maxErrors = '-1'
 
-  // Process-specific resource requirements
-  // NOTE - Only one of the labels below are used in the fastqc process in the main script.
-  //        If possible, it would be nice to keep the same label naming convention when
-  //        adding in your processes.
-  // TODO nf-core: Customise requirements for specific processes.
-  // See https://www.nextflow.io/docs/latest/config.html#config-process-selectors
   withLabel:process_low {
-    cpus = { check_max( 2 * task.attempt, 'cpus' ) }
-    memory = { check_max( 14.GB * task.attempt, 'memory' ) }
+    cpus = { check_max( 1 * task.attempt, 'cpus' ) }
+    memory = { check_max( 4.GB * task.attempt, 'memory' ) }
     time = { check_max( 6.h * task.attempt, 'time' ) }
   }
   withLabel:process_medium {
-    cpus = { check_max( 6 * task.attempt, 'cpus' ) }
-    memory = { check_max( 42.GB * task.attempt, 'memory' ) }
+    cpus = { check_max( 4 * task.attempt, 'cpus' ) }
+    memory = { check_max( 8.GB * task.attempt, 'memory' ) }
     time = { check_max( 8.h * task.attempt, 'time' ) }
   }
   withLabel:process_high {
-    cpus = { check_max( 12 * task.attempt, 'cpus' ) }
-    memory = { check_max( 84.GB * task.attempt, 'memory' ) }
+    cpus = { check_max( 8 * task.attempt, 'cpus' ) }
+    memory = { check_max( 64.GB * task.attempt, 'memory' ) }
     time = { check_max( 10.h * task.attempt, 'time' ) }
   }
   withLabel:process_long {
     time = { check_max( 20.h * task.attempt, 'time' ) }
   }
+  withLabel:process_highmem {
+    memory = { check_max( 12.GB * task.attempt, 'memory' ) }
+  }
   withName:get_software_versions {
     cache = false
   }
-  
 }
diff --git a/conf/hicpro.config b/conf/hicpro.config
new file mode 100644
index 0000000000000000000000000000000000000000..cd0cf0b5a54f860312f49ac193802d53964ce686
--- /dev/null
+++ b/conf/hicpro.config
@@ -0,0 +1,38 @@
+/*
+ * -------------------------------------------------
+ *  Nextflow config file for Genomes paths
+ * -------------------------------------------------
+ * Defines reference genomes
+ * Can be used by any config that customises the base
+ * path using $params.genomes_base / --genomes_base
+ */
+
+params {
+
+       // Alignment options
+       bwt2_opts_end2end = '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'
+       bwt2_opts_trimmed = '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'
+       min_mapq = 10
+
+       // Digestion Hi-C
+       restriction_site = 'A^AGCTT'
+       ligation_site = 'AAGCTAGCTT'
+       min_restriction_fragment_size = 
+       max_restriction_fragment_size = 
+       min_insert_size = 
+       max_insert_size =
+
+       // Hi-C Processing
+       min_cis_dist = 
+       rm_singleton = true
+       rm_multi = true
+       rm_dup = true
+
+       bin_size = '1000000,500000'
+
+       ice_max_iter = 100
+       ice_filer_low_count_perc = 0.02
+       ice_filer_high_count_perc =  0
+       ice_eps = 0.1
+}
+
diff --git a/conf/igenomes.config b/conf/igenomes.config
index caeafceb25ed1ab89f906bb597ff15ef30f3a3e7..1ba2588593f4e1940dc0bf3a3380f0114a71684e 100644
--- a/conf/igenomes.config
+++ b/conf/igenomes.config
@@ -12,410 +12,151 @@ params {
   genomes {
     'GRCh37' {
       fasta       = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Homo_sapiens/Ensembl/GRCh37/Annotation/README.txt"
-      mito_name   = "MT"
-      macs_gsize  = "2.7e9"
-      blacklist   = "${baseDir}/assets/blacklists/GRCh37-blacklist.bed"
     }
     'GRCh38' {
       fasta       = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Homo_sapiens/NCBI/GRCh38/Annotation/Genes/genes.bed"
-      mito_name   = "chrM"
-      macs_gsize  = "2.7e9"
-      blacklist   = "${baseDir}/assets/blacklists/hg38-blacklist.bed"
     }
     'GRCm38' {
       fasta       = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Mus_musculus/Ensembl/GRCm38/Annotation/README.txt"
-      mito_name   = "MT"
-      macs_gsize  = "1.87e9"
-      blacklist   = "${baseDir}/assets/blacklists/GRCm38-blacklist.bed"
     }
     'TAIR10' {
       fasta       = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Arabidopsis_thaliana/Ensembl/TAIR10/Annotation/README.txt"
-      mito_name   = "Mt"
     }
     'EB2' {
       fasta       = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Bacillus_subtilis_168/Ensembl/EB2/Annotation/README.txt"
     }
     'UMD3.1' {
       fasta       = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Bos_taurus/Ensembl/UMD3.1/Annotation/README.txt"
-      mito_name   = "MT"
     }
     'WBcel235' {
       fasta       = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Caenorhabditis_elegans/Ensembl/WBcel235/Annotation/Genes/genes.bed"
-      mito_name   = "MtDNA"
-      macs_gsize  = "9e7"
     }
     'CanFam3.1' {
       fasta       = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Canis_familiaris/Ensembl/CanFam3.1/Annotation/README.txt"
-      mito_name   = "MT"
     }
     'GRCz10' {
       fasta       = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Danio_rerio/Ensembl/GRCz10/Annotation/Genes/genes.bed"
-      mito_name   = "MT"
     }
     'BDGP6' {
       fasta       = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Drosophila_melanogaster/Ensembl/BDGP6/Annotation/Genes/genes.bed"
-      mito_name   = "M"
-      macs_gsize  = "1.2e8"
     }
     'EquCab2' {
       fasta       = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Equus_caballus/Ensembl/EquCab2/Annotation/README.txt"
-      mito_name   = "MT"
     }
     'EB1' {
       fasta       = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Escherichia_coli_K_12_DH10B/Ensembl/EB1/Annotation/README.txt"
     }
     'Galgal4' {
       fasta       = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Gallus_gallus/Ensembl/Galgal4/Annotation/Genes/genes.bed"
-      mito_name   = "MT"
     }
     'Gm01' {
       fasta       = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Glycine_max/Ensembl/Gm01/Annotation/README.txt"
     }
     'Mmul_1' {
       fasta       = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Macaca_mulatta/Ensembl/Mmul_1/Annotation/README.txt"
-      mito_name   = "MT"
     }
     'IRGSP-1.0' {
       fasta       = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Oryza_sativa_japonica/Ensembl/IRGSP-1.0/Annotation/Genes/genes.bed"
-      mito_name   = "Mt"
     }
     'CHIMP2.1.4' {
       fasta       = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Pan_troglodytes/Ensembl/CHIMP2.1.4/Annotation/README.txt"
-      mito_name   = "MT"
     }
     'Rnor_6.0' {
       fasta       = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Rattus_norvegicus/Ensembl/Rnor_6.0/Annotation/Genes/genes.bed"
-      mito_name   = "MT"
     }
     'R64-1-1' {
       fasta       = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Saccharomyces_cerevisiae/Ensembl/R64-1-1/Annotation/Genes/genes.bed"
-      mito_name   = "MT"
-      macs_gsize  = "1.2e7"
     }
     'EF2' {
       fasta       = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Schizosaccharomyces_pombe/Ensembl/EF2/Annotation/README.txt"
-      mito_name   = "MT"
-      macs_gsize  = "1.21e7"
     }
     'Sbi1' {
       fasta       = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Sorghum_bicolor/Ensembl/Sbi1/Annotation/README.txt"
     }
     'Sscrofa10.2' {
       fasta       = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Sus_scrofa/Ensembl/Sscrofa10.2/Annotation/README.txt"
-      mito_name   = "MT"
     }
     'AGPv3' {
       fasta       = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Zea_mays/Ensembl/AGPv3/Annotation/Genes/genes.bed"
-      mito_name   = "Mt"
     }
     'hg38' {
       fasta       = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.bed"
-      mito_name   = "chrM"
-      macs_gsize  = "2.7e9"
-      blacklist   = "${baseDir}/assets/blacklists/hg38-blacklist.bed"
     }
     'hg19' {
       fasta       = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Homo_sapiens/UCSC/hg19/Annotation/README.txt"
-      mito_name   = "chrM"
-      macs_gsize  = "2.7e9"
-      blacklist   = "${baseDir}/assets/blacklists/hg19-blacklist.bed"
     }
     'mm10' {
       fasta       = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Mus_musculus/UCSC/mm10/Annotation/README.txt"
-      mito_name   = "chrM"
-      macs_gsize  = "1.87e9"
-      blacklist   = "${baseDir}/assets/blacklists/mm10-blacklist.bed"
     }
     'bosTau8' {
       fasta       = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Bos_taurus/UCSC/bosTau8/Annotation/Genes/genes.bed"
-      mito_name   = "chrM"
     }
     'ce10' {
       fasta       = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Caenorhabditis_elegans/UCSC/ce10/Annotation/README.txt"
-      mito_name   = "chrM"
-      macs_gsize  = "9e7"
     }
     'canFam3' {
       fasta       = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Canis_familiaris/UCSC/canFam3/Annotation/README.txt"
-      mito_name   = "chrM"
     }
     'danRer10' {
       fasta       = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Danio_rerio/UCSC/danRer10/Annotation/Genes/genes.bed"
-      mito_name   = "chrM"
-      macs_gsize  = "1.37e9"
     }
     'dm6' {
       fasta       = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Drosophila_melanogaster/UCSC/dm6/Annotation/Genes/genes.bed"
-      mito_name   = "chrM"
-      macs_gsize  = "1.2e8"
     }
     'equCab2' {
       fasta       = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Equus_caballus/UCSC/equCab2/Annotation/README.txt"
-      mito_name   = "chrM"
     }
     'galGal4' {
       fasta       = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Gallus_gallus/UCSC/galGal4/Annotation/README.txt"
-      mito_name   = "chrM"
     }
     'panTro4' {
       fasta       = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Pan_troglodytes/UCSC/panTro4/Annotation/README.txt"
-      mito_name   = "chrM"
     }
     'rn6' {
       fasta       = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Rattus_norvegicus/UCSC/rn6/Annotation/Genes/genes.bed"
-      mito_name   = "chrM"
     }
     'sacCer3' {
       fasta       = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Sequence/BismarkIndex/"
-      readme      = "${params.igenomes_base}/Saccharomyces_cerevisiae/UCSC/sacCer3/Annotation/README.txt"
-      mito_name   = "chrM"
-      macs_gsize  = "1.2e7"
     }
     'susScr3' {
       fasta       = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/WholeGenomeFasta/genome.fa"
-      bwa         = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/BWAIndex/genome.fa"
       bowtie2     = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/Bowtie2Index/"
-      star        = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/STARIndex/"
-      bismark     = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Sequence/BismarkIndex/"
-      gtf         = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/Genes/genes.gtf"
-      bed12       = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/Genes/genes.bed"
-      readme      = "${params.igenomes_base}/Sus_scrofa/UCSC/susScr3/Annotation/README.txt"
-      mito_name   = "chrM"
     }
   }
 }
diff --git a/conf/test.config b/conf/test.config
index 02f48cb2238a0a51c70442ee2d0571f364d1218c..2ab8e57eda3d8fddb90c6d7ed3ddf2c0fd0672ca 100644
--- a/conf/test.config
+++ b/conf/test.config
@@ -8,19 +8,35 @@
  */
 
 params {
-  config_profile_name = 'Test profile'
+
+config_profile_name = 'Hi-C test data from Schalbetter et al. (2017)'
   config_profile_description = 'Minimal test dataset to check pipeline function'
-  // Limit resources so that this can run on GitHub Actions
+
+  // Limit resources so that this can run on Travis
   max_cpus = 2
-  max_memory = 6.GB
-  max_time = 48.h
+  max_memory = 4.GB
+  max_time = 1.h
 
   // Input data
-  // TODO nf-core: Specify the paths to your test data on nf-core/test-datasets
-  // TODO nf-core: Give any required params for the test so that command line flags are not needed
-  single_end = false
   input_paths = [
-    ['Testdata', ['https://github.com/nf-core/test-datasets/raw/exoseq/testdata/Testdata_R1.tiny.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/exoseq/testdata/Testdata_R2.tiny.fastq.gz']],
-    ['SRR389222', ['https://github.com/nf-core/test-datasets/raw/methylseq/testdata/SRR389222_sub1.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/methylseq/testdata/SRR389222_sub2.fastq.gz']]
-  ]
+    ['SRR4292758_00', ['https://github.com/nf-core/test-datasets/raw/hic/data/SRR4292758_00_R1.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/hic/data/SRR4292758_00_R2.fastq.gz']]
+   ]
+
+  // Annotations
+  fasta = 'https://github.com/nf-core/test-datasets/raw/hic/reference/W303_SGD_2015_JRIU00000000.fsa'
+  restriction_site = 'A^AGCTT'
+  ligation_site = 'AAGCTAGCTT'
+  
+  min_mapq = 2
+  rm_dup = true
+  rm_singleton = true
+  rm_multi = true
+
+  min_restriction_fragment_size = 100
+  max_restriction_fragment_size = 100000
+  min_insert_size = 100
+  max_insert_size = 600
+  
+  // Options
+  skip_cool = true
 }
diff --git a/conf/test_full.config b/conf/test_full.config
index 921372eec05fe6147c540a39463c54f0ebf09bce..47d31760585c66025666f112dcd03a23faeac543 100644
--- a/conf/test_full.config
+++ b/conf/test_full.config
@@ -12,11 +12,25 @@ params {
   config_profile_description = 'Full test dataset to check pipeline function'
 
   // Input data for full size test
-  // TODO nf-core: Specify the paths to your full test data ( on nf-core/test-datasets or directly in repositories, e.g. SRA)
-  // TODO nf-core: Give any required params for the test so that command line flags are not needed
-  single_end = false
   input_paths = [
-    ['Testdata', ['https://github.com/nf-core/test-datasets/raw/exoseq/testdata/Testdata_R1.tiny.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/exoseq/testdata/Testdata_R2.tiny.fastq.gz']],
-    ['SRR389222', ['https://github.com/nf-core/test-datasets/raw/methylseq/testdata/SRR389222_sub1.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/methylseq/testdata/SRR389222_sub2.fastq.gz']]
-  ]
+    ['SRR4292758_00', ['https://github.com/nf-core/test-datasets/raw/hic/data/SRR4292758_00_R1.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/hic/data/SRR4292758_00_R2.fastq.gz']]
+   ]
+
+  // Annotations
+  fasta = 'https://github.com/nf-core/test-datasets/raw/hic/reference/W303_SGD_2015_JRIU00000000.fsa'
+  restriction_site = 'A^AGCTT'
+  ligation_site = 'AAGCTAGCTT'
+  
+  min_mapq = 2
+  rm_dup = true
+  rm_singleton = true
+  rm_multi = true
+
+  min_restriction_fragment_size = 100
+  max_restriction_fragment_size = 100000
+  min_insert_size = 100
+  max_insert_size = 600
+  
+  // Options
+  skip_cool = true
 }
diff --git a/docs/README.md b/docs/README.md
index a6889549c7f27bda0aed81947685713781fe2d1b..bdbc92abc939ff716f3fcaba1b5069be471c9049 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -3,8 +3,11 @@
 The nf-core/hic documentation is split into the following pages:
 
 * [Usage](usage.md)
-  * An overview of how the pipeline works, how to run it and a description of all of the different command-line flags.
+  * An overview of how the pipeline works, how to run it and a
+  description of all of the different command-line flags.
 * [Output](output.md)
-  * An overview of the different results produced by the pipeline and how to interpret them.
+  * An overview of the different results produced by the pipeline
+  and how to interpret them.
 
-You can find a lot more documentation about installing, configuring and running nf-core pipelines on the website: [https://nf-co.re](https://nf-co.re)
+You can find a lot more documentation about installing, configuring
+and running nf-core pipelines on the website: [https://nf-co.re](https://nf-co.re)
diff --git a/docs/images/nfcore-hic_logo.png b/docs/images/nfcore-hic_logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..d75e44b92cc77b61d1cc79747cfa390101784a03
Binary files /dev/null and b/docs/images/nfcore-hic_logo.png differ
diff --git a/docs/images/nfcore-hic_logo.svg b/docs/images/nfcore-hic_logo.svg
new file mode 100644
index 0000000000000000000000000000000000000000..7a2086987e69e1529baf40fe3ea526c174b85ac1
--- /dev/null
+++ b/docs/images/nfcore-hic_logo.svg
@@ -0,0 +1,205 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   enable-background="new 0 0 1150.9 517"
+   version="1.1"
+   viewBox="0 0 1456.7841 522.44342"
+   xml:space="preserve"
+   id="svg2"
+   inkscape:version="0.91 r13725"
+   sodipodi:docname="EmptyName_logo.svg"
+   width="1456.7842"
+   height="522.44342"><sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1920"
+     inkscape:window-height="1015"
+     id="namedview75"
+     showgrid="false"
+     inkscape:zoom="0.35757767"
+     inkscape:cx="253.20897"
+     inkscape:cy="13.773735"
+     inkscape:window-x="1920"
+     inkscape:window-y="724"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer3"
+     fit-margin-left="62.25"
+     fit-margin-right="62.25"
+     fit-margin-top="62.25"
+     fit-margin-bottom="62.25" /><metadata
+     id="metadata4"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs6"><clipPath
+       id="e"><path
+         d="m 280.17,136.33 -21.5,-21.584 61,0 0,21.584 -39.5,0 z"
+         id="path9"
+         inkscape:connector-curvature="0" /></clipPath><linearGradient
+       id="f"
+       x2="1"
+       gradientTransform="matrix(37.935819,29.638391,-29.638391,37.935819,295.72019,166.19562)"
+       gradientUnits="userSpaceOnUse"><stop
+         stop-color="#0c542a"
+         offset="0"
+         id="stop12" /><stop
+         stop-color="#0c542a"
+         offset=".21472"
+         id="stop14" /><stop
+         stop-color="#25af64"
+         offset=".57995"
+         id="stop16" /><stop
+         stop-color="#25af64"
+         offset=".84663"
+         id="stop18" /><stop
+         stop-color="#25af64"
+         offset="1"
+         id="stop20" /></linearGradient></defs><style
+     type="text/css"
+     id="style22">
+	.st0{fill:#24AF63;}
+	.st1{font-family:'Maven Pro';}
+	.st1{font-weight:'bold';}
+	.st2{font-size:209.8672px;}
+	.st3{fill:#21AF62;}
+	.st4{fill:#ECDC86;}
+	.st5{fill:#A0918F;}
+	.st6{fill:#3F2B29;}
+	.st7{fill:#396E35;}
+	.st8{fill:url(#d);}
+</style><linearGradient
+     id="d"
+     x1="295.45999"
+     x2="333.34"
+     y1="150.75"
+     y2="180.35001"
+     gradientUnits="userSpaceOnUse"><stop
+       stop-color="#0D552B"
+       offset=".2147"
+       id="stop41" /><stop
+       stop-color="#176837"
+       offset=".311"
+       id="stop43" /><stop
+       stop-color="#1F8448"
+       offset=".4609"
+       id="stop45" /><stop
+       stop-color="#239A56"
+       offset=".604"
+       id="stop47" /><stop
+       stop-color="#24A860"
+       offset=".7361"
+       id="stop49" /><stop
+       stop-color="#25AF64"
+       offset=".8466"
+       id="stop51" /></linearGradient><g
+     inkscape:groupmode="layer"
+     id="layer2"
+     inkscape:label="Icon"
+     style="display:inline"
+     transform="translate(5.3761467,0)"><g
+       id="g4209"><path
+         style="fill:#24af63"
+         inkscape:connector-curvature="0"
+         id="path24"
+         d="m 1084.1,163.75 0,3.6 c -0.1,0 -0.1,0 -0.2,0.1 l -1.8,-1.5 c -4,-3.4 -8.3,-6.4 -13.1,-8.8 -0.8,-0.4 -1.6,-0.9 -2.5,-1.1 -0.1,-0.1 -0.2,-0.2 -0.3,-0.2 -1.8,-0.7 -3.6,-1.3 -5.5,-1.8 -4.1,-0.9 -8.2,-1 -12.3,-0.2 -5.3,1.1 -10,3.4 -14.5,6.4 -4.4,3 -8.4,6.5 -12.1,10.2 -0.7,0.7 -0.7,0.7 -1.1,-0.2 -2,-4.1 -4.2,-8.1 -6.9,-11.8 -2.1,-2.8 -4.4,-5.4 -7.4,-7.2 -3,-1.9 -6.3,-2.6 -9.8,-1.7 -4.3,1 -7.8,3.6 -11.1,6.4 -2,1.5 -3.8,3.3 -5.6,5 -1.7,1.5 -3.3,3 -5,4.5 -0.3,0.3 -0.5,0.3 -0.8,0 -1.7,-1.8 -3.5,-3.4 -5.6,-4.5 -3.1,-1.7 -6.3,-1.9 -9.6,-0.8 -2.8,0.9 -5.2,2.4 -7.7,4 -1,0.6 -1.9,1.3 -2.9,1.8 l 0,-0.2 c 0.1,-0.2 0.1,-0.4 0.1,-0.6 0.2,-4.4 0.5,-8.9 1.2,-13.3 1,-6.1 2.5,-12 5.2,-17.5 2,-4.1 4.7,-7.9 8.1,-11 4.5,-4.1 9.8,-6.7 15.6,-8.3 6.3,-1.8 12.7,-2.6 19.2,-2.9 2.6,-0.1 5.1,-0.2 7.7,-0.3 1.3,0.5 2.6,0.8 3.9,1.2 1.9,0.6 3.8,1.2 5.7,1.7 1,0.4 1.9,0.7 2.9,1.1 3.7,1.3 7.3,3 10.4,5.5 0.8,0.6 1.6,1.3 2.4,2 -0.2,-0.6 -0.4,-1.1 -0.6,-1.7 -1.4,-3.7 -3.5,-6.7 -6.9,-8.8 -1.4,-0.9 -2.9,-1.5 -4.4,-2.3 0.1,0 0.3,0 0.4,-0.1 4.5,-0.8 9.1,-1.2 13.7,-1.4 3.9,-0.2 7.9,-0.1 11.8,0.3 4.6,0.5 9.1,1.4 13.4,3 6.4,2.4 11.9,6.1 16.2,11.5 3.7,4.7 6.1,10.1 7.6,15.9 1.5,5.7 2.1,11.6 2.3,17.5 -0.1,2.1 -0.1,4.3 -0.1,6.5 z"
+         class="st0" /><path
+         style="fill:#ecdc86"
+         inkscape:connector-curvature="0"
+         id="path26"
+         d="m 1084.1,157.15 0.1,0 0,6.6 -0.1,0 0,-6.6 z"
+         class="st4" /><path
+         style="fill:#a0918f"
+         inkscape:connector-curvature="0"
+         id="path28"
+         d="m 1047.6,62.25 0,0.1 -4.5,0 0,-0.1 4.5,0 z"
+         class="st5" /><path
+         style="fill:#24af63"
+         inkscape:connector-curvature="0"
+         id="path30"
+         d="m 1050.5,250.65 c 2.5,-1 4.9,-2.3 7.3,-3.6 2.8,-1.7 5.4,-3.5 8,-5.4 2.2,-1.6 4.3,-3.3 6.4,-5.1 l 3.6,-3 c 0.2,-0.2 0.2,-0.1 0.3,0.1 0.4,1.6 0.7,3.3 1.1,5 0.5,2.3 0.8,4.6 1.1,6.9 0.3,2.7 0.4,5.3 0.2,8 -0.2,3.3 -0.8,6.6 -2,9.7 -0.7,1.9 -1.6,3.7 -2.7,5.4 -1.4,2.2 -3,4.2 -5,5.9 -2.3,2.1 -4.9,3.9 -7.7,5.4 -3.7,2.1 -7.7,3.6 -11.8,4.8 -3.9,1.2 -7.9,2 -11.9,2.7 -1.1,0.2 -2.2,0.4 -3.3,0.4 -2.3,-0.1 -4.6,-0.6 -6.8,-1.4 -3.3,-1.3 -6.2,-3.3 -9.5,-4.8 -1.8,-0.8 -3.6,-1.4 -5.5,-1.5 -2.5,-0.2 -4.6,0.7 -6.4,2.4 l -3.9,3.9 c -2.2,2.2 -4.8,3.7 -7.9,4.2 -2.1,0.3 -4.1,0.2 -6.2,-0.1 -2.9,-0.4 -5.7,-1.1 -8.4,-1.9 -4,-1.3 -7.7,-3.1 -11.1,-5.7 -3.2,-2.4 -5.7,-5.4 -7.8,-8.8 -2.1,-3.5 -3.3,-7.2 -4.2,-11.1 -0.4,-1.7 -0.6,-3.5 -0.8,-5.2 -0.3,-2.5 -0.4,-4.9 -0.3,-7.4 0.1,-3.5 0.4,-6.9 0.9,-10.4 0.4,0.4 0.8,0.7 1.1,1 2.2,2 4.7,3.8 7.3,5.4 2.9,1.7 6.1,3.1 9.4,4 2.2,0.6 4.5,1 6.8,1.1 1.9,0.2 3.8,0.2 5.7,0.1 2.2,-0.1 4.5,-0.3 6.7,-0.9 0.3,0 0.6,0 0.8,-0.1 2,-0.4 4,-0.9 6,-1.5 2.3,-0.7 4.5,-1.4 6.7,-2.2 2.1,-0.8 4.3,-1.7 6.4,-2.6 0.6,-0.3 1,-0.2 1.5,0.2 3.5,2.7 7.3,5 11.4,6.7 4.6,1.8 9.3,2.7 14.2,2.3 3.6,-0.7 7,-1.6 10.3,-2.9 z"
+         class="st0" /><path
+         style="fill:#ecdc86"
+         inkscape:connector-curvature="0"
+         id="path32"
+         d="m 1050.5,250.65 c -3.3,1.3 -6.7,2.2 -10.2,2.5 -4.9,0.4 -9.6,-0.5 -14.2,-2.3 -4.1,-1.6 -7.9,-3.9 -11.4,-6.7 -0.5,-0.4 -0.9,-0.5 -1.5,-0.2 -2.1,0.9 -4.2,1.8 -6.4,2.6 -2.2,0.8 -4.4,1.6 -6.7,2.2 -2,0.6 -4,1 -6,1.5 -0.3,0.1 -0.6,0.1 -0.8,0.1 0.7,-0.8 1.4,-1.6 2.1,-2.4 2.8,-3.2 4.8,-6.9 5.9,-11.1 1.6,-5.6 3.2,-11.3 4.6,-17 1,-4.2 1.8,-8.4 2.4,-12.7 0.4,-3.1 1,-14.9 0.8,-17.7 -0.5,-8.6 -2.4,-16.8 -5.9,-24.7 -2.1,-4.7 -5.7,-7.9 -10.7,-9.2 -2.2,-0.6 -4.4,-0.4 -6.5,0.3 -0.2,0.1 -0.3,0.2 -0.5,0.1 3.3,-2.8 6.7,-5.4 11.1,-6.4 3.5,-0.8 6.8,-0.2 9.8,1.7 3,1.9 5.3,4.4 7.4,7.2 2.7,3.7 4.9,7.7 6.9,11.8 0.4,0.9 0.4,0.9 1.1,0.2 3.7,-3.8 7.7,-7.3 12.1,-10.2 4.4,-3 9.2,-5.3 14.5,-6.4 4.1,-0.8 8.2,-0.7 12.3,0.2 1.9,0.4 3.7,1 5.5,1.8 0.1,0.1 0.3,0.1 0.3,0.2 -5.3,0.1 -9.8,2.1 -13.9,5.2 -2,1.5 -3.8,3.2 -5.2,5.3 -1.1,1.7 -2.1,3.6 -2.9,5.5 -1.8,3.8 -3.3,7.8 -4.4,11.9 -0.9,3.5 -1.5,7.1 -1.8,10.7 -0.2,2.8 -0.3,5.6 -0.2,8.4 0.1,3.4 0.5,6.8 0.9,10.3 0.7,5.7 1.7,11.4 2.7,17.1 0.5,3.1 0.9,6.3 1.5,9.5 0.7,4.6 3.3,8 7,10.6 0,-0.1 0.2,0 0.3,0.1 z"
+         class="st4" /><path
+         style="fill:#3f2b29"
+         inkscape:connector-curvature="0"
+         id="path34"
+         d="m 1043.1,62.35 4.5,0 c 3.6,0.2 7.2,0.8 10.6,2 2.7,0.9 3.3,2.7 1.7,5 -1.1,1.6 -2.7,2.8 -4.4,3.9 -2.1,1.4 -4.4,2.6 -6.9,3.5 -2.5,1 -4.9,0 -6.5,-2.5 -0.5,-0.8 -0.9,-1.6 -1.1,-2.5 -0.1,-0.3 -0.2,-0.4 -0.5,-0.4 -5.6,-1 -10.6,0.3 -14.7,4.3 -3.4,3.2 -5.4,7.3 -6.8,11.7 -1.3,4 -1.9,8 -2.1,12.2 -0.2,3.7 0.1,7.4 0.6,11 0.1,0.6 0.3,1.2 0.3,1.9 0.1,0.8 -0.2,1.5 -0.8,1.9 -0.7,0.5 -1.5,0.4 -2.3,0.4 -1.9,-0.6 -3.8,-1.2 -5.7,-1.7 l 0,-1.3 c 0,-2 0,-3.9 0.1,-5.9 0.4,-7.7 1.6,-15.3 4.6,-22.5 2.2,-5.4 5.4,-10.1 9.9,-13.8 3.7,-3.1 7.9,-5.1 12.6,-6.2 2.4,-0.6 4.6,-0.9 6.9,-1 z"
+         class="st6" /><path
+         style="fill:#396e35"
+         inkscape:connector-curvature="0"
+         id="path36"
+         d="m 1014.8,114.65 c 0.8,0 1.6,0.1 2.3,-0.4 0.7,-0.5 0.9,-1.2 0.8,-1.9 -0.1,-0.6 -0.2,-1.3 -0.3,-1.9 0.4,0 0.7,-0.1 1.1,-0.1 1.4,0.8 2.9,1.5 4.4,2.3 3.4,2.1 5.5,5.1 6.9,8.8 0.2,0.6 0.4,1.1 0.6,1.7 -0.8,-0.7 -1.6,-1.4 -2.4,-2 -3.2,-2.4 -6.7,-4.1 -10.4,-5.5 -1.1,-0.3 -2,-0.6 -3,-1 z"
+         class="st7" /><path
+         style="fill:#396e35"
+         inkscape:connector-curvature="0"
+         id="path38"
+         d="m 1009.1,111.65 0,1.3 c -1.3,-0.4 -2.6,-0.7 -3.9,-1.2 1.4,-0.1 2.7,-0.1 3.9,-0.1 z"
+         class="st7" /></g></g><g
+     inkscape:groupmode="layer"
+     id="layer3"
+     inkscape:label="Text"
+     style="display:inline"
+     transform="translate(5.3761467,0)"><text
+       x="48.898899"
+       y="241.24541"
+       font-size="209.87px"
+       font-weight="bold"
+       id="text53"
+       style="font-weight:bold;font-size:209.86999512px;font-family:'Maven Pro'"><tspan
+         class="st0 st1 st2"
+         x="48.898899"
+         y="241.24541"
+         font-size="209.87px"
+         font-weight="bold"
+         id="tspan55"
+         style="font-weight:bold;font-size:209.86720276px;font-family:'Maven Pro';fill:#24af63">nf-<tspan
+   id="tspan57"
+   style="fill:#000000" /></tspan></text>
+<text
+       x="357.14139"
+       y="241.24541"
+       font-size="209.87px"
+       font-weight="bold"
+       id="text69"
+       style="font-weight:bold;font-size:209.86999512px;font-family:'Maven Pro'"><tspan
+         class="st0 st1 st2"
+         x="357.14139"
+         y="241.24541"
+         font-size="209.87px"
+         font-weight="bold"
+         id="tspan71"
+         style="font-weight:bold;font-size:209.86720276px;font-family:'Maven Pro';fill:#24af63"><tspan
+           id="tspan73"
+           style="fill:#000000">core/</tspan></tspan></text>
+<text
+       x="-260.05042"
+       y="457.04541"
+       font-weight="bold"
+       id="text59"
+       style="font-weight:bold;font-family:'Maven Pro'"><tspan
+         class="st1 st2"
+         x="47.849564"
+         y="457.04541"
+         font-size="209.87px"
+         font-weight="bold"
+         id="tspan61"
+         style="font-weight:bold;font-size:209.86720276px;font-family:'Maven Pro'">hic</tspan></text>
+<path
+       d="m 300.43725,166.1155 -21.53224,21.61638 61.0915,0 0,-21.61638 -39.55926,0 z"
+       id="path67"
+       inkscape:connector-curvature="0"
+       style="fill:url(#f)" /></g></svg>
\ No newline at end of file
diff --git a/docs/output.md b/docs/output.md
index 4a7372e92d17cf7897c93fb403d6bd41c2bc2f29..895a4f2a16d75e7ca0dfb21c13d0cccc7ea3a322 100644
--- a/docs/output.md
+++ b/docs/output.md
@@ -8,41 +8,190 @@
 
 This document describes the output produced by the pipeline. Most of the plots are taken from the MultiQC report, which summarises results at the end of the pipeline.
 
-The directories listed below will be created in the results directory after the pipeline has finished. All paths are relative to the top-level results directory.
-
-<!-- TODO nf-core: Write this documentation describing your workflow's output -->
+The directories listed below will be created in the results directory
+after the pipeline has finished. All paths are relative to the top-level
+results directory.
 
 ## Pipeline overview
 
 The pipeline is built using [Nextflow](https://www.nextflow.io/)
 and processes data using the following steps:
 
-* [FastQC](#fastqc) - Read quality control
-* [MultiQC](#multiqc) - Aggregate report describing results from the whole pipeline
-* [Pipeline information](#pipeline-information) - Report metrics generated during the workflow execution
-
-## FastQC
-
-[FastQC](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/) gives general quality metrics about your sequenced reads. It provides information about the quality score distribution across your reads, per base sequence content (%A/T/G/C), adapter contamination and overrepresented sequences.
-
-For further reading and documentation see the [FastQC help pages](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/).
-
-**Output files:**
-
-* `fastqc/`
-  * `*_fastqc.html`: FastQC report containing quality metrics for your untrimmed raw fastq files.
-* `fastqc/zips/`
-  * `*_fastqc.zip`: Zip archive containing the FastQC report, tab-delimited data file and plot images.
-
-> **NB:** The FastQC plots displayed in the MultiQC report shows _untrimmed_ reads. They may contain adapter sequence and potentially regions with low quality.
+* [Reads alignment](#reads-alignment)
+* [Valid pairs detection](#valid-pairs-detection)
+* [Duplicates removal](#duplicates-removal)
+* [Contact maps](#contact-maps)
+* [MultiQC](#multiqc) - aggregate report and quality controls, describing
+results of the whole pipeline
+* [Export](#exprot) - additionnal export for compatibility with downstream
+analysis tool and visualization
+
+The current version is mainly based on the
+[HiC-Pro](https://github.com/nservant/HiC-Pro) pipeline.
+For details about the workflow, see
+[Servant et al. 2015](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-015-0831-x)
+
+## Reads alignment
+
+Using Hi-C data, each reads mate has to be independantly aligned on the
+reference genome.
+The current workflow implements a two steps mapping strategy. First, the reads
+are aligned using an end-to-end aligner.
+Second, reads spanning the ligation junction are trimmmed from their 3' end,
+and aligned back on the genome.
+Aligned reads for both fragment mates are then paired in a single paired-end
+BAM file.
+Singletons are discarded, and multi-hits are filtered according to the
+configuration parameters (`--rm-multi`).
+Note that if the `--dnase` mode is activated, HiC-Pro will skip the second
+mapping step.
+
+**Output directory: `results/mapping`**
+
+* `*bwt2pairs.bam` - final BAM file with aligned paired data
+* `*.pairstat` - mapping statistics
+
+if `--saveAlignedIntermediates` is specified, additional mapping file results
+are available ;
+
+* `*.bam` - Aligned reads (R1 and R2) from end-to-end alignment
+* `*_unmap.fastq` - Unmapped reads after end-to-end alignment
+* `*_trimmed.fastq` - Trimmed reads after end-to-end alignment
+* `*_trimmed.bam` - Alignment of trimmed reads
+* `*bwt2merged.bam` - merged BAM file after the two-steps alignment
+* `*.mapstat` - mapping statistics per read mate
+
+Usually, a high fraction of reads is expected to be aligned on the genome
+(80-90%). Among them, we usually observed a few percent (around 10%) of step 2
+aligned reads. Those reads are chimeric fragments for which we detect a
+ligation junction. An abnormal level of chimeric reads can reflect a ligation
+issue during the library preparation.
+The fraction of singleton or multi-hits depends on the genome complexity and
+the fraction of unmapped reads. The fraction of singleton is usually close to
+the sum of unmapped R1 and R2 reads, as it is unlikely that both mates from the
+same pair were unmapped.
+
+## Valid pairs detection
+
+Each aligned reads can be assigned to one restriction fragment according to the
+reference genome and the digestion protocol.
+
+Invalid pairs are classified as follow:
+
+* Dangling end, i.e. unligated fragments (both reads mapped on the same
+restriction fragment)
+* Self circles, i.e. fragments ligated on themselves (both reads mapped on the
+same restriction fragment in inverted orientation)
+* Religation, i.e. ligation of juxtaposed fragments
+* Filtered pairs, i.e. any pairs that do not match the filtering criteria on
+inserts size, restriction fragments size
+* Dumped pairs, i.e. any pairs for which we were not able to reconstruct the
+ligation product.
+
+Only valid pairs involving two different restriction fragments are used to
+build the contact maps.
+Duplicated valid pairs associated to PCR artefacts are discarded
+(see `--rm_dup`).
+
+In case of Hi-C protocols that do not require a restriction enzyme such as
+DNase Hi-C or micro Hi-C, the assignment to a restriction is not possible
+(see `--dnase`).
+Short range interactions that are likely to be spurious ligation products
+can thus be discarded using the `--min_cis_dist` parameter.
+
+* `*.validPairs` - List of valid ligation products
+* `*.DEpairs` - List of dangling-end products
+* `*.SCPairs` - List of self-circle products
+* `*.REPairs` - List of religation products
+* `*.FiltPairs` - List of filtered pairs
+* `*RSstat` - Statitics of number of read pairs falling in each category
+
+The validPairs are stored using a simple tab-delimited text format ;
+
+```bash
+read name / chr_reads1 / pos_reads1 / strand_reads1 / chr_reads2 / pos_reads2 /
+strand_reads2 / fragment_size / res frag name R1 / res frag R2 / mapping qual R1
+/ mapping qual R2 [/ allele_specific_tag]
+```
+
+The ligation efficiency can be assessed using the filtering of valid and
+invalid pairs. As the ligation is a random process, 25% of each valid ligation
+class is expected. In the same way, a high level of dangling-end or self-circle
+read pairs is associated with a low quality experiment, and reveals a problem
+during the digestion, fill-in or ligation steps.
+
+In the context of Hi-C protocol without restriction enzyme, this analysis step
+is skipped. The aligned pairs are therefore directly used to generate the
+contact maps. A filter of the short range contact (typically <1kb) is
+recommanded as this pairs are likely to be self ligation products.
+
+## Duplicates removal
+
+Note that validPairs file are generated per reads chunck.
+These files are then merged in the allValidPairs file, and duplicates are
+removed if the `--rm_dup` parameter is used.
+
+* `*allValidPairs` - combined valid pairs from all read chunks
+* `*mergestat` - statistics about duplicates removal and valid pairs information
+
+Additional quality controls such as fragment size distribution can be extracted
+from the list of valid interaction products.
+We usually expect to see a distribution centered around 300 pb which correspond
+to the paired-end insert size commonly used.
+The fraction of dplicates is also presented. A high level of duplication
+indicates a poor molecular complexity and a potential PCR bias.
+Finaly, an important metric is to look at the fraction of intra and
+inter-chromosomal interactions, as well as long range (>20kb) versus short
+range (<20kb) intra-chromosomal interactions.
+
+## Contact maps
+
+Intra et inter-chromosomal contact maps are build for all specified resolutions.
+The genome is splitted into bins of equal size. Each valid interaction is
+associated with the genomic bins to generate the raw maps.
+In addition, Hi-C data can contain several sources of biases which has to be
+corrected.
+The current workflow uses the [ìced](https://github.com/hiclib/iced) and
+[Varoquaux and Servant, 2018](http://joss.theoj.org/papers/10.21105/joss.01286)
+python package which proposes a fast implementation of the original ICE
+normalization algorithm (Imakaev et al. 2012), making the assumption of equal
+visibility of each fragment.
+
+* `*.matrix` - genome-wide contact maps
+* `*_iced.matrix` - genome-wide iced contact maps
+
+The contact maps are generated for all specified resolution
+(see `--bin_size` argument)
+A contact map is defined by :
+
+* A list of genomic intervals related to the specified resolution (BED format).
+* A matrix, stored as standard triplet sparse format (i.e. list format).
+
+Based on the observation that a contact map is symmetric and usually sparse,
+only non-zero values are stored for half of the matrix. The user can specified
+if the 'upper', 'lower' or 'complete' matrix has to be stored. The 'asis'
+option allows to store the contacts as they are observed from the valid pairs
+files.
+
+```bash
+   A   B   10
+   A   C   23
+   B   C   24
+   (...)
+```
+
+This format is memory efficient, and is compatible with several software for
+downstream analysis.
 
 ## MultiQC
 
-[MultiQC](http://multiqc.info) is a visualization tool that generates a single HTML report summarizing all samples in your project. Most of the pipeline QC results are visualised in the report and further statistics are available in the report data directory.
-
-The pipeline has special steps which also allow the software versions to be reported in the MultiQC output for future traceability.
+[MultiQC](http://multiqc.info) is a visualisation tool that generates a single
+HTML report summarising all samples in your project. Most of the pipeline QC
+results are visualised in the report and further statistics are available in
+within the report data directory.
 
-For more information about how to use MultiQC reports, see [https://multiqc.info](https://multiqc.info).
+The pipeline has special steps which allow the software versions used to be
+reported in the MultiQC output for future traceability.
 
 **Output files:**
 
@@ -58,6 +207,9 @@ For more information about how to use MultiQC reports, see [https://multiqc.info
 **Output files:**
 
 * `pipeline_info/`
-  * Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`.
-  * Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.csv`.
-  * Documentation for interpretation of results in HTML format: `results_description.html`.
+  * Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`,
+  `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`.
+  * Reports generated by the pipeline: `pipeline_report.html`,
+  `pipeline_report.txt` and `software_versions.csv`.
+  * Documentation for interpretation of results in HTML format:
+  `results_description.html`.
diff --git a/docs/usage.md b/docs/usage.md
index 76803ddcd3e60bf2dc060ced21e610abf5cff261..31eabe9fb8044a14de7319c5824c18a53b06936e 100644
--- a/docs/usage.md
+++ b/docs/usage.md
@@ -6,8 +6,6 @@
 
 ## Introduction
 
-<!-- TODO nf-core: Add documentation about anything specific to running your pipeline. For general topics, please point to (and add to) the main nf-core website. -->
-
 ## Running the pipeline
 
 The typical command for running the pipeline is as follows:
@@ -16,7 +14,8 @@ The typical command for running the pipeline is as follows:
 nextflow run nf-core/hic --input '*_R{1,2}.fastq.gz' -profile docker
 ```
 
-This will launch the pipeline with the `docker` configuration profile. See below for more information about profiles.
+This will launch the pipeline with the `docker` configuration profile.
+See below for more information about profiles.
 
 Note that the pipeline will create the following files in your working directory:
 
@@ -29,7 +28,12 @@ results         # Finished results (configurable, see below)
 
 ### Updating the pipeline
 
-When you run the above command, Nextflow automatically pulls the pipeline code from GitHub and stores it as a cached version. When running the pipeline after this, it will always use the cached version if available - even if the pipeline has been updated since. To make sure that you're running the latest version of the pipeline, make sure that you regularly update the cached version of the pipeline:
+When you run the above command, Nextflow automatically pulls the pipeline code
+from GitHub and stores it as a cached version. When running the pipeline after
+this, it will always use the cached version if available - even if the pipeline
+has been updated since. To make sure that you're running the latest version of
+the pipeline, make sure that you regularly update the cached version of the
+pipeline:
 
 ```bash
 nextflow pull nf-core/hic
@@ -37,30 +41,66 @@ nextflow pull nf-core/hic
 
 ### Reproducibility
 
-It's a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since.
+It's a good idea to specify a pipeline version when running the pipeline on
+your data. This ensures that a specific version of the pipeline code and
+software are used when you run your pipeline. If you keep using the same tag,
+you'll be running the same version of the pipeline, even if there have been
+changes to the code since.
+
+It's a good idea to specify a pipeline version when running the pipeline on
+your data. This ensures that a specific version of the pipeline code and
+software are used when you run your pipeline. If you keep using the same tag,
+you'll be running the same version of the pipeline, even if there have been
+changes to the code since.
+
+First, go to the
+[nf-core/hic releases page](https://github.com/nf-core/hic/releases) and find
+the latest version number - numeric only (eg. `1.3.1`).
+Then specify this when running the pipeline with `-r` (one hyphen)
+eg. `-r 1.3.1`.
+
+This version number will be logged in reports when you run the pipeline, so
+that you'll know what you used when you look back in the future.
 
-First, go to the [nf-core/hic releases page](https://github.com/nf-core/hic/releases) and find the latest version number - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`.
+### Automatic resubmission
 
-This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future.
+Each step in the pipeline has a default set of requirements for number of CPUs,
+memory and time. For most of the steps in the pipeline, if the job exits with
+an error code of `143` (exceeded requested resources) it will automatically
+resubmit with higher requests (2 x original, then 3 x original). If it still
+fails after three times then the pipeline is stopped.
 
 ## Core Nextflow arguments
 
-> **NB:** These options are part of Nextflow and use a _single_ hyphen (pipeline parameters use a double-hyphen).
+> **NB:** These options are part of Nextflow and use a _single_ hyphen
+(pipeline parameters use a double-hyphen).
 
 ### `-profile`
 
-Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments.
+Use this parameter to choose a configuration profile. Profiles can give
+configuration presets for different compute environments.
 
 Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Conda) - see below.
 
-> We highly recommend the use of Docker or Singularity containers for full pipeline reproducibility, however when this is not possible, Conda is also supported.
+> We highly recommend the use of Docker or Singularity containers for full
+pipeline reproducibility, however when this is not possible, Conda is also supported.
 
-The pipeline also dynamically loads configurations from [https://github.com/nf-core/configs](https://github.com/nf-core/configs) when it runs, making multiple config profiles for various institutional clusters available at run time. For more information and to see if your system is available in these configs please see the [nf-core/configs documentation](https://github.com/nf-core/configs#documentation).
+The pipeline also dynamically loads configurations from
+[https://github.com/nf-core/configs](https://github.com/nf-core/configs)
+when it runs, making multiple config profiles for various institutional
+clusters available at run time.
+For more information and to see if your system is available in these
+configs please see
+the [nf-core/configs documentation](https://github.com/nf-core/configs#documentation).
 
-Note that multiple profiles can be loaded, for example: `-profile test,docker` - the order of arguments is important!
-They are loaded in sequence, so later profiles can overwrite earlier profiles.
+Note that multiple profiles can be loaded, for example: `-profile test,docker` -
+the order of arguments is important!
+They are loaded in sequence, so later profiles can overwrite
+earlier profiles.
 
-If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended.
+If `-profile` is not specified, the pipeline will run locally and
+expect all software to be
+installed and available on the `PATH`. This is _not_ recommended.
 
 * `docker`
   * A generic configuration profile to be used with [Docker](https://docker.com/)
@@ -81,19 +121,30 @@ If `-profile` is not specified, the pipeline will run locally and expect all sof
 
 ### `-resume`
 
-Specify this when restarting a pipeline. Nextflow will used cached results from any pipeline steps where the inputs are the same, continuing from where it got to previously.
-
-You can also supply a run name to resume a specific run: `-resume [run-name]`. Use the `nextflow log` command to show previous run names.
+Specify this when restarting a pipeline. Nextflow will used cached results from
+any pipeline steps where the inputs are the same, continuing from where it got
+to previously.
+You can also supply a run name to resume a specific run: `-resume [run-name]`.
+Use the `nextflow log` command to show previous run names.
 
 ### `-c`
 
-Specify the path to a specific config file (this is a core Nextflow command). See the [nf-core website documentation](https://nf-co.re/usage/configuration) for more information.
+Specify the path to a specific config file (this is a core Nextflow command).
+See the [nf-core website documentation](https://nf-co.re/usage/configuration)
+for more information.
 
 #### Custom resource requests
 
-Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the steps in the pipeline, if the job exits with an error code of `143` (exceeded requested resources) it will automatically resubmit with higher requests (2 x original, then 3 x original). If it still fails after three times then the pipeline is stopped.
+Each step in the pipeline has a default set of requirements for number of CPUs,
+memory and time. For most of the steps in the pipeline, if the job exits with
+an error code of `143` (exceeded requested resources) it will automatically resubmit
+with higher requests (2 x original, then 3 x original). If it still fails after three
+times then the pipeline is stopped.
 
-Whilst these default requirements will hopefully work for most people with most data, you may find that you want to customise the compute resources that the pipeline requests. You can do this by creating a custom config file. For example, to give the workflow process `star` 32GB of memory, you could use the following config:
+Whilst these default requirements will hopefully work for most people with most data,
+you may find that you want to customise the compute resources that the pipeline requests.
+You can do this by creating a custom config file. For example, to give the workflow
+process `star` 32GB of memory, you could use the following config:
 
 ```nextflow
 process {
@@ -103,26 +154,440 @@ process {
 }
 ```
 
-See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for more information.
+See the main [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html)
+for more information.
 
 If you are likely to be running `nf-core` pipelines regularly it may be a good idea to request that your custom config file is uploaded to the `nf-core/configs` git repository. Before you do this please can you test that the config file works with your pipeline of choice using the `-c` parameter (see definition above). You can then create a pull request to the `nf-core/configs` repository with the addition of your config file, associated documentation file (see examples in [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs)), and amending [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) to include your custom profile.
 
-If you have any questions or issues please send us a message on [Slack](https://nf-co.re/join/slack) on the [`#configs` channel](https://nfcore.slack.com/channels/configs).
+If you have any questions or issues please send us a message on
+[Slack](https://nf-co.re/join/slack) on the
+[`#configs` channel](https://nfcore.slack.com/channels/configs).
 
 ### Running in the background
 
-Nextflow handles job submissions and supervises the running jobs. The Nextflow process must run until the pipeline is finished.
+Nextflow handles job submissions and supervises the running jobs.
+The Nextflow process must run until the pipeline is finished.
 
-The Nextflow `-bg` flag launches Nextflow in the background, detached from your terminal so that the workflow does not stop if you log out of your session. The logs are saved to a file.
+The Nextflow `-bg` flag launches Nextflow in the background, detached from your terminal
+so that the workflow does not stop if you log out of your session. The logs are
+saved to a file.
 
-Alternatively, you can use `screen` / `tmux` or similar tool to create a detached session which you can log back into at a later time.
-Some HPC setups also allow you to run nextflow within a cluster job submitted your job scheduler (from where it submits more jobs).
+Alternatively, you can use `screen` / `tmux` or similar tool to create a detached
+session which you can log back into at a later time.
+Some HPC setups also allow you to run nextflow within a cluster job submitted
+your job scheduler (from where it submits more jobs).
 
 #### Nextflow memory requirements
 
-In some cases, the Nextflow Java virtual machines can start to request a large amount of memory.
-We recommend adding the following line to your environment to limit this (typically in `~/.bashrc` or `~./bash_profile`):
+In some cases, the Nextflow Java virtual machines can start to request a
+large amount of memory.
+We recommend adding the following line to your environment to limit this
+(typically in `~/.bashrc` or `~./bash_profile`):
 
 ```bash
 NXF_OPTS='-Xms1g -Xmx4g'
 ```
+
+## Inputs
+
+### `--input`
+
+Use this to specify the location of your input FastQ files. For example:
+
+```bash
+--input 'path/to/data/sample_*_{1,2}.fastq'
+```
+
+Please note the following requirements:
+
+1. The path must be enclosed in quotes
+2. The path must have at least one `*` wildcard character
+3. When using the pipeline with paired end data, the path must use `{1,2}`
+notation to specify read pairs.
+
+If left unspecified, a default pattern is used: `data/*{1,2}.fastq.gz`
+
+By default, the pipeline expects paired-end data. If you have single-end data,
+you need to specify `--single_end` on the command line when you launch the pipeline.
+A normal glob pattern, enclosed in quotation marks, can then be used for `--input`.
+For example:
+
+```bash
+--single_end --input '*.fastq'
+```
+
+It is not possible to run a mixture of single-end and paired-end files in one run.
+
+## Reference genomes
+
+The pipeline config files come bundled with paths to the illumina iGenomes reference
+index files. If running with docker or AWS, the configuration is set up to use the
+[AWS-iGenomes](https://ewels.github.io/AWS-iGenomes/) resource.
+
+### `--genome` (using iGenomes)
+
+There are many different species supported in the iGenomes references. To run
+the pipeline, you must specify which to use with the `--genome` flag.
+
+You can find the keys to specify the genomes in the
+[iGenomes config file](../conf/igenomes.config).
+
+### `--fasta`
+
+If you prefer, you can specify the full path to your reference genome when you
+run the pipeline:
+
+```bash
+--fasta '[path to Fasta reference]'
+```
+
+### `--bwt2_index`
+
+The bowtie2 indexes are required to run the Hi-C pipeline. If the
+`--bwt2_index` is not specified, the pipeline will either use the igenome
+bowtie2 indexes (see `--genome` option) or build the indexes on-the-fly
+(see `--fasta` option)
+
+```bash
+--bwt2_index '[path to bowtie2 index (with basename)]'
+```
+
+### `--chromosome_size`
+
+The Hi-C pipeline will also requires a two-columns text file with the
+chromosome name and its size (tab separated).
+If not specified, this file will be automatically created by the pipeline.
+In the latter case, the `--fasta` reference genome has to be specified.
+
+```bash
+   chr1    249250621
+   chr2    243199373
+   chr3    198022430
+   chr4    191154276
+   chr5    180915260
+   chr6    171115067
+   chr7    159138663
+   chr8    146364022
+   chr9    141213431
+   chr10   135534747
+   (...)
+```
+
+```bash
+--chromosome_size '[path to chromosome size file]'
+```
+
+### `--restriction_fragments`
+
+Finally, Hi-C experiments based on restriction enzyme digestion requires a BED
+file with coordinates of restriction fragments.
+
+```bash
+   chr1   0       16007   HIC_chr1_1    0   +
+   chr1   16007   24571   HIC_chr1_2    0   +
+   chr1   24571   27981   HIC_chr1_3    0   +
+   chr1   27981   30429   HIC_chr1_4    0   +
+   chr1   30429   32153   HIC_chr1_5    0   +
+   chr1   32153   32774   HIC_chr1_6    0   +
+   chr1   32774   37752   HIC_chr1_7    0   +
+   chr1   37752   38369   HIC_chr1_8    0   +
+   chr1   38369   38791   HIC_chr1_9    0   +
+   chr1   38791   39255   HIC_chr1_10   0   +
+   (...)
+```
+
+If not specified, this file will be automatically created by the pipline.
+In this case, the `--fasta` reference genome will be used.
+Note that the `--restriction_site` parameter is mandatory to create this file.
+
+## Hi-C specific options
+
+The following options are defined in the `hicpro.config` file, and can be
+updated either using a custom configuration file (see `-c` option) or using
+command line parameter.
+
+### Reads mapping
+
+The reads mapping is currently based on the two-steps strategy implemented in
+the HiC-pro pipeline. The idea is to first align reads from end-to-end.
+Reads that do not aligned are then trimmed at the ligation site, and their 5'
+end is re-aligned to the reference genome.
+Note that the default option are quite stringent, and can be updated according
+to the reads quality or the reference genome.
+
+#### `--bwt2_opts_end2end`
+
+Bowtie2 alignment option for end-to-end mapping.
+Default: '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end
+--reorder'
+
+```bash
+--bwt2_opts_end2end '[Options for bowtie2 step1 mapping on full reads]'
+```
+
+#### `--bwt2_opts_trimmed`
+
+Bowtie2 alignment option for trimmed reads mapping (step 2).
+Default: '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end
+--reorder'
+
+```bash
+--bwt2_opts_trimmed '[Options for bowtie2 step2 mapping on trimmed reads]'
+```
+
+#### `--min_mapq`
+
+Minimum mapping quality. Reads with lower quality are discarded. Default: 10
+
+```bash
+--min_mapq '[Minimum quality value]'
+```
+
+### Digestion Hi-C
+
+#### `--restriction_site`
+
+Restriction motif(s) for Hi-C digestion protocol. The restriction motif(s)
+is(are) used to generate the list of restriction fragments.
+The precise cutting site of the restriction enzyme has to be specified using
+the '^' character. Default: 'A^AGCTT'
+Here are a few examples:
+
+* MboI: ^GATC
+* DpnII: ^GATC
+* BglII: A^GATCT
+* HindIII: A^AGCTT
+* ARIMA kit: ^GATC,G^ANTC
+
+Note that multiples restriction motifs can be provided (comma-separated) and
+that 'N' base are supported.
+
+```bash
+--restriction_size '[Cutting motif]'
+```
+
+#### `--ligation_site`
+
+Ligation motif after reads ligation. This motif is used for reads trimming and
+depends on the fill in strategy.
+Note that multiple ligation sites can be specified (comma separated) and that
+'N' base is interpreted and replaced by 'A','C','G','T'.
+Default: 'AAGCTAGCTT'
+
+```bash
+--ligation_site '[Ligation motif]'
+```
+
+Exemple of the ARIMA kit: GATCGATC,GANTGATC,GANTANTC,GATCANTC
+
+#### `--min_restriction_fragment_size`
+
+Minimum size of restriction fragments to consider for the Hi-C processing.
+Default: ''
+
+```bash
+--min_restriction_fragment_size '[numeric]'
+```
+
+#### `--max_restriction_fragment_size`
+
+Maximum size of restriction fragments to consider for the Hi-C processing.
+Default: ''
+
+```bash
+--max_restriction_fragment_size '[numeric]'
+```
+
+#### `--min_insert_size`
+
+Minimum reads insert size. Shorter 3C products are discarded.
+Default: ''
+
+```bash
+--min_insert_size '[numeric]'
+```
+
+#### `--max_insert_size`
+
+Maximum reads insert size. Longer 3C products are discarded.
+Default: ''
+
+```bash
+--max_insert_size '[numeric]'
+```
+
+### DNAse Hi-C
+
+#### `--dnase`
+
+In DNAse Hi-C mode, all options related to digestion Hi-C
+(see previous section) are ignored.
+In this case, it is highly recommanded to use the `--min_cis_dist` parameter
+to remove spurious ligation products.
+
+```bash
+--dnase'
+```
+
+### Hi-C processing
+
+#### `--min_cis_dist`
+
+Filter short range contact below the specified distance.
+Mainly useful for DNase Hi-C. Default: ''
+
+```bash
+--min_cis_dist '[numeric]'
+```
+
+#### `--rm_singleton`
+
+If specified, singleton reads are discarded at the mapping step.
+
+```bash
+--rm_singleton
+```
+
+#### `--rm_dup`
+
+If specified, duplicates reads are discarded before building contact maps.
+
+```bash
+--rm_dup
+```
+
+#### `--rm_multi`
+
+If specified, reads that aligned multiple times on the genome are discarded.
+Note the default mapping options are based on random hit assignment, meaning
+that only one position is kept per read.
+
+```bash
+--rm_multi
+```
+
+## Genome-wide contact maps
+
+### `--bin_size`
+
+Resolution of contact maps to generate (space separated).
+Default:'1000000,500000'
+
+```bash
+--bins_size '[numeric]'
+```
+
+### `--ice_max_iter`
+
+Maximum number of iteration for ICE normalization.
+Default: 100
+
+```bash
+--ice_max_iter '[numeric]'
+```
+
+### `--ice_filer_low_count_perc`
+
+Define which pourcentage of bins with low counts should be force to zero.
+Default: 0.02
+
+```bash
+--ice_filter_low_count_perc '[numeric]'
+```
+
+### `--ice_filer_high_count_perc`
+
+Define which pourcentage of bins with low counts should be discarded before
+normalization. Default: 0
+
+```bash
+--ice_filter_high_count_perc '[numeric]'
+```
+
+### `--ice_eps`
+
+The relative increment in the results before declaring convergence for ICE
+normalization. Default: 0.1
+
+```bash
+--ice_eps '[numeric]'
+```
+
+## Inputs/Outputs
+
+### `--split_fastq`
+
+By default, the nf-core Hi-C pipeline expects one read pairs per sample.
+However, for large Hi-C data processing single fastq files can be very
+time consuming.
+The `--split_fastq` option allows to automatically split input read pairs
+into chunks of reads of size `--fastq_chunks_size` (Default : 20000000). In this case, all chunks will be processed in parallel
+and merged before generating the contact maps, thus leading to a significant
+increase of processing performance.
+
+```bash
+--split_fastq --fastq_chunks_size '[numeric]'
+```
+
+### `--save_reference`
+
+If specified, annotation files automatically generated from the `--fasta` file
+are exported in the results folder. Default: false
+
+```bash
+--save_reference
+```
+
+### `--save_aligned_intermediates`
+
+If specified, all intermediate mapping files are saved and exported in the
+results folder. Default: false
+
+```bash
+--save_aligned_inermediates
+```
+
+### `--save_interaction_bam`
+
+If specified, write a BAM file with all classified reads (valid paires,
+dangling end, self-circle, etc.) and its tags.
+
+```bash
+--save_interaction_bam
+```
+
+## Skip options
+
+### `--skip_maps`
+
+If defined, the workflow stops with the list of valid interactions, and the
+genome-wide maps are not built. Usefult for capture-C analysis. Default: false
+
+```bash
+--skip_maps
+```
+
+### `--skip_ice`
+
+If defined, the ICE normalization is not run on the raw contact maps.
+Default: false
+
+```bash
+--skip_ice
+```
+
+### `--skip_cool`
+
+If defined, cooler files are not generated. Default: false
+
+```bash
+--skip_cool
+```
+
+### `--skip_multiQC`
+
+If defined, the MultiQC report is not generated. Default: false
+
+```bash
+--skip_multiQC
+```
diff --git a/environment.yml b/environment.yml
index cde6a0c58bdaea5d3a9c55b167d26df11441799e..6ee111e3beddb0e3c014d31ceec9d4a64bff25e6 100644
--- a/environment.yml
+++ b/environment.yml
@@ -6,10 +6,25 @@ channels:
   - bioconda
   - defaults
 dependencies:
-  - conda-forge::python=3.7.3
-  - conda-forge::markdown=3.1.1
-  - conda-forge::pymdown-extensions=6.0
-  - conda-forge::pygments=2.5.2
-  # TODO nf-core: Add required software dependencies here
-  - bioconda::fastqc=0.11.8
-  - bioconda::multiqc=1.7
+  - conda-forge::python=3.7.6
+  - pip=20.0.1
+  - conda-forge::scipy=1.4.1
+  - conda-forge::numpy=1.18.1
+  - bioconda::iced=0.5.6
+  - bioconda::bx-python=0.8.8
+  - bioconda::pysam=0.15.4
+  - conda-forge::pymdown-extensions=7.1
+  - bioconda::cooler=0.8.6
+  - bioconda::bowtie2=2.3.5
+  - bioconda::samtools=1.9
+  - bioconda::multiqc=1.8
+
+## Dev tools
+  - bioconda::hicexplorer=3.4.3
+  - bioconda::bioconductor-hitc=1.32.0
+  - conda-forge::r-optparse=1.6.6
+  - bioconda::ucsc-bedgraphtobigwig=357
+  - conda-forge::cython=0.29.19
+  - pip:
+    - cooltools==0.3.2
+    - fanc==0.8.30
\ No newline at end of file
diff --git a/main.nf b/main.nf
index 40164c2b2dfb43af888037960402d62d5e549288..53976cae18ae30e055c3c75ae1a041269bfe7117 100644
--- a/main.nf
+++ b/main.nf
@@ -10,7 +10,7 @@
 */
 
 def helpMessage() {
-    // TODO nf-core: Add to this help message with new command line parameters
+    // Add to this help message with new command line parameters
     log.info nfcoreHeader()
     log.info"""
 
@@ -21,57 +21,93 @@ def helpMessage() {
     nextflow run nf-core/hic --input '*_R{1,2}.fastq.gz' -profile docker
 
     Mandatory arguments:
-      --input [file]                  Path to input data (must be surrounded with quotes)
-      -profile [str]                  Configuration profile to use. Can use multiple (comma separated)
-                                      Available: conda, docker, singularity, test, awsbatch, <institute> and more
+      --input [file]                            Path to input data (must be surrounded with quotes)
+      -profile [str]                            Configuration profile to use. Can use multiple (comma separated)
+                                                Available: conda, docker, singularity, awsbatch, test and more.
 
-    Options:
-      --genome [str]                  Name of iGenomes reference
-      --single_end [bool]             Specifies that the input is single-end reads
+    References                                  If not specified in the configuration file or you wish to overwrite any of the references.
+      --genome [str]                            Name of iGenomes reference
+      --bwt2_index [file]                       Path to Bowtie2 index
+      --fasta [file]                            Path to Fasta reference
+      --chromosome_size [file]                  Path to chromosome size file
+      --restriction_fragments [file]            Path to restriction fragment file (bed)
+      --save_reference [bool]                   Save reference genome to output folder. Default: False
 
-    References                        If not specified in the configuration file or you wish to overwrite any of the references
-      --fasta [file]                  Path to fasta reference
+    Alignments
+      --split_fastq [bool]                      Split fastq files in reads chunks to speed up computation. Default: false
+      --fastq_chunks_size [int]                 Size of read chunks if split_fastq is true. Default: 20000000
+      --save_aligned_intermediates [bool]       Save intermediates alignment files. Default: False
+      --bwt2_opts_end2end [str]                 Options for bowtie2 end-to-end mappinf (first mapping step). See hic.config for default.
+      --bwt2_opts_trimmed [str]                 Options for bowtie2 mapping after ligation site trimming. See hic.config for default.
+      --min_mapq [int]                          Minimum mapping quality values to consider. Default: 10
+      --restriction_site [str]                  Cutting motif(s) of restriction enzyme(s) (comma separated). Default: 'A^AGCTT'
+      --ligation_site [str]                     Ligation motifs to trim (comma separated). Default: 'AAGCTAGCTT'
+      --rm_singleton [bool]                     Remove singleton reads. Default: true
+      --rm_multi [bool]                         Remove multi-mapped reads. Default: true
+      --rm_dup [bool]                           Remove duplicates. Default: true
+
+    Contacts calling
+      --min_restriction_fragment_size [int]     Minimum size of restriction fragments to consider. Default: 0
+      --max_restriction_fragment_size [int]     Maximum size of restriction fragments to consider. Default: 0
+      --min_insert_size [int]                   Minimum insert size of mapped reads to consider. Default: 0
+      --max_insert_size [int]                   Maximum insert size of mapped reads to consider. Default: 0
+      --save_interaction_bam [bool]             Save BAM file with interaction tags (dangling-end, self-circle, etc.). Default: False
+
+      --dnase [bool]                            Run DNase Hi-C mode. All options related to restriction fragments are not considered. Default: False
+      --min_cis_dist [int]                      Minimum intra-chromosomal distance to consider. Default: 0
+
+    Contact maps
+      --bin_size [int]                          Bin size for contact maps (comma separated). Default: '1000000,500000'
+      --ice_max_iter [int]                      Maximum number of iteration for ICE normalization. Default: 100
+      --ice_filter_low_count_perc [float]       Percentage of low counts columns/rows to filter before ICE normalization. Default: 0.02
+      --ice_filter_high_count_perc [float]      Percentage of high counts columns/rows to filter before ICE normalization. Default: 0
+      --ice_eps [float]                         Convergence criteria for ICE normalization. Default: 0.1
+
+
+    Workflow
+      --skip_maps [bool]                        Skip generation of contact maps. Useful for capture-C. Default: False
+      --skip_ice [bool]                         Skip ICE normalization. Default: False
+      --skip_cool [bool]                        Skip generation of cool files. Default: False
+      --skip_multiqc [bool]                     Skip MultiQC. Default: False
 
     Other options:
-      --outdir [file]                 The output directory where the results will be saved
-      --publish_dir_mode [str]        Mode for publishing results in the output directory. Available: symlink, rellink, link, copy, copyNoFollow, move (Default: copy)
-      --email [email]                 Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits
-      --email_on_fail [email]         Same as --email, except only send mail if the workflow is not successful
-      --max_multiqc_email_size [str]  Threshold size for MultiQC report to be attached in notification email. If file generated by pipeline exceeds the threshold, it will not be attached (Default: 25MB)
-      -name [str]                     Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic
+      --outdir [file]                           The output directory where the results will be saved
+      --publish_dir_mode [str]                  Mode for publishing results in the output directory. Available: symlink, rellink, link, copy, copyNoFollow, move (Default: copy)
+      --email [email]                           Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits. Default: None
+      --email_on_fail [email]                   Same as --email, except only send mail if the workflow is not successful
+      --max_multiqc_email_size [str]            Theshold size for MultiQC report to be attached in notification email. If file generated by pipeline exceeds the threshold, it will not be attached (Default: 25MB)
+      -name [str]                               Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic. Default: None
 
     AWSBatch options:
-      --awsqueue [str]                The AWSBatch JobQueue that needs to be set when running on AWSBatch
-      --awsregion [str]               The AWS Region for your AWS Batch job to run on
-      --awscli [str]                  Path to the AWS CLI tool
+      --awsqueue [str]                          The AWSBatch JobQueue that needs to be set when running on AWSBatch
+      --awsregion [str]                         The AWS Region for your AWS Batch job to run on
+      --awscli [str]                            Path to the AWS CLI tool
     """.stripIndent()
 }
 
+/**********************************************************
+ * SET UP CONFIGURATION VARIABLES
+ */
+
 // Show help message
-if (params.help) {
+if (params.help){
     helpMessage()
     exit 0
 }
 
-/*
- * SET UP CONFIGURATION VARIABLES
- */
-
 // Check if genome exists in the config file
 if (params.genomes && params.genome && !params.genomes.containsKey(params.genome)) {
     exit 1, "The provided genome '${params.genome}' is not available in the iGenomes file. Currently the available genomes are ${params.genomes.keySet().join(", ")}"
 }
 
-// TODO nf-core: Add any reference files that are needed
-// Configurable reference genomes
-//
-// NOTE - THIS IS NOT USED IN THIS PIPELINE, EXAMPLE ONLY
-// If you want to use the channel below in a process, define the following:
-//   input:
-//   file fasta from ch_fasta
-//
+// Check Digestion or DNase Hi-C mode
+if (!params.dnase && !params.ligation_site) {
+   exit 1, "Ligation motif not found. For DNase Hi-C, please use '--dnase' option"
+}
+
+// Reference index path configuration
+params.bwt2_index = params.genome ? params.genomes[ params.genome ].bowtie2 ?: false : false
 params.fasta = params.genome ? params.genomes[ params.genome ].fasta ?: false : false
-if (params.fasta) { ch_fasta = file(params.fasta, checkIfExists: true) }
 
 // Has the run name been specified by the user?
 // this has the bonus effect of catching both -name and --name
@@ -97,39 +133,128 @@ ch_multiqc_custom_config = params.multiqc_config ? Channel.fromPath(params.multi
 ch_output_docs = file("$projectDir/docs/output.md", checkIfExists: true)
 ch_output_docs_images = file("$projectDir/docs/images/", checkIfExists: true)
 
+/**********************************************************
+ * SET UP CHANNELS
+ */
+
 /*
- * Create a channel for input read files
+ * input read files
  */
-if (params.input_paths) {
-    if (params.single_end) {
-        Channel
-            .from(params.input_paths)
-            .map { row -> [ row[0], [ file(row[1][0], checkIfExists: true) ] ] }
-            .ifEmpty { exit 1, "params.input_paths was empty - no input files supplied" }
-            .into { ch_read_files_fastqc; ch_read_files_trimming }
-    } else {
-        Channel
-            .from(params.input_paths)
-            .map { row -> [ row[0], [ file(row[1][0], checkIfExists: true), file(row[1][1], checkIfExists: true) ] ] }
-            .ifEmpty { exit 1, "params.input_paths was empty - no input files supplied" }
-            .into { ch_read_files_fastqc; ch_read_files_trimming }
-    }
-} else {
-    Channel
-        .fromFilePairs(params.input, size: params.single_end ? 1 : 2)
-        .ifEmpty { exit 1, "Cannot find any reads matching: ${params.input}\nNB: Path needs to be enclosed in quotes!\nIf this is single-end data, please specify --single_end on the command line." }
-        .into { ch_read_files_fastqc; ch_read_files_trimming }
+if (params.input_paths){
+
+   raw_reads = Channel.create()
+   raw_reads_2 = Channel.create()
+
+   Channel
+      .from( params.input_paths )
+      .map { row -> [ row[0], [file(row[1][0]), file(row[1][1])]] }
+      .separate( raw_reads, raw_reads_2 ) { a -> [tuple(a[0], a[1][0]), tuple(a[0], a[1][1])] }
+ }else{
+
+   raw_reads = Channel.create()
+   raw_reads_2 = Channel.create()
+
+   Channel
+      .fromFilePairs( params.input )
+      .separate( raw_reads, raw_reads_2 ) { a -> [tuple(a[0], a[1][0]), tuple(a[0], a[1][1])] }
+}
+
+// SPlit fastq files
+// https://www.nextflow.io/docs/latest/operator.html#splitfastq
+
+if ( params.split_fastq ){
+   raw_reads_full = raw_reads.concat( raw_reads_2 )
+   raw_reads = raw_reads_full.splitFastq( by: params.fastq_chunks_size, file: true)
+ }else{
+   raw_reads = raw_reads.concat( raw_reads_2 ).dump(tag: "data")
+}
+
+
+/*
+ * Other input channels
+ */
+
+// Reference genome
+if ( params.bwt2_index ){
+   lastPath = params.bwt2_index.lastIndexOf(File.separator)
+   bwt2_dir =  params.bwt2_index.substring(0,lastPath+1)
+   bwt2_base = params.bwt2_index.substring(lastPath+1)
+
+   Channel.fromPath( bwt2_dir , checkIfExists: true)
+      .ifEmpty { exit 1, "Genome index: Provided index not found: ${params.bwt2_index}" }
+      .into { bwt2_index_end2end; bwt2_index_trim }
+
+}
+else if ( params.fasta ) {
+   lastPath = params.fasta.lastIndexOf(File.separator)
+   fasta_base = params.fasta.substring(lastPath+1)
+   bwt2_base = fasta_base.toString() - ~/(\.fa)?(\.fasta)?(\.fas)?(\.fsa)?$/
+
+   Channel.fromPath( params.fasta )
+	.ifEmpty { exit 1, "Genome index: Fasta file not found: ${params.fasta}" }
+        .set { fasta_for_index }
+}
+else {
+   exit 1, "No reference genome specified!"
+}
+
+// Chromosome size
+if ( params.chromosome_size ){
+   Channel.fromPath( params.chromosome_size , checkIfExists: true)
+         .into {chromosome_size; chromosome_size_cool}
+}
+else if ( params.fasta ){
+   Channel.fromPath( params.fasta )
+	.ifEmpty { exit 1, "Chromosome sizes: Fasta file not found: ${params.fasta}" }
+       	.set { fasta_for_chromsize }
+}
+else {
+   exit 1, "No chromosome size specified!"
+}
+
+// Restriction fragments
+if ( params.restriction_fragments ){
+   Channel.fromPath( params.restriction_fragments, checkIfExists: true )
+      .set {res_frag_file}
+}
+else if ( params.fasta && params.restriction_site ){
+   Channel.fromPath( params.fasta )
+           .ifEmpty { exit 1, "Restriction fragments: Fasta file not found: ${params.fasta}" }
+           .set { fasta_for_resfrag }
+}
+else {
+    exit 1, "No restriction fragments file specified!"
 }
 
+// Resolutions for contact maps
+map_res = Channel.from( params.bin_size.tokenize(',') )
+
+/**********************************************************
+ * SET UP LOGS
+ */
+
 // Header log info
 log.info nfcoreHeader()
 def summary = [:]
-if (workflow.revision) summary['Pipeline Release'] = workflow.revision
+if(workflow.revision) summary['Pipeline Release'] = workflow.revision
 summary['Run Name']         = custom_runName ?: workflow.runName
-// TODO nf-core: Report custom parameters here
 summary['Input']            = params.input
+summary['splitFastq']       = params.split_fastq
+if (params.split_fastq)
+   summary['Read chunks Size'] = params.fastq_chunks_size
 summary['Fasta Ref']        = params.fasta
-summary['Data Type']        = params.single_end ? 'Single-End' : 'Paired-End'
+summary['Restriction Motif']= params.restriction_site
+summary['Ligation Motif']   = params.ligation_site
+summary['DNase Mode']       = params.dnase
+summary['Remove Dup']       = params.rm_dup
+summary['Remove MultiHits'] = params.rm_multi
+summary['Min MAPQ']         = params.min_mapq
+summary['Min Fragment Size']= params.min_restriction_fragment_size
+summary['Max Fragment Size']= params.max_restriction_fragment_size
+summary['Min Insert Size']  = params.min_insert_size
+summary['Max Insert Size']  = params.max_insert_size
+summary['Min CIS dist']     = params.min_cis_dist
+summary['Maps resolution']  = params.bin_size
 summary['Max Resources']    = "$params.max_memory memory, $params.max_cpus cpus, $params.max_time time per job"
 if (workflow.containerEngine) summary['Container'] = "$workflow.containerEngine - $workflow.container"
 summary['Output dir']       = params.outdir
@@ -177,6 +302,7 @@ Channel.from(summary.collect{ [it.key, it.value] })
 /*
  * Parse software version numbers
  */
+
 process get_software_versions {
     publishDir "${params.outdir}/pipeline_info", mode: params.publish_dir_mode,
         saveAs: { filename ->
@@ -184,75 +310,563 @@ process get_software_versions {
                       else null
                 }
 
-    output:
-    file 'software_versions_mqc.yaml' into ch_software_versions_yaml
-    file "software_versions.csv"
+   output:
+   file 'software_versions_mqc.yaml' into software_versions_yaml
+   file "software_versions.csv"
+
+   script:
+   """
+   echo $workflow.manifest.version > v_pipeline.txt
+   echo $workflow.nextflow.version > v_nextflow.txt
+   bowtie2 --version > v_bowtie2.txt
+   python --version > v_python.txt 2>&1
+   samtools --version > v_samtools.txt
+   multiqc --version > v_multiqc.txt
+   scrape_software_versions.py &> software_versions_mqc.yaml
+   """
+}
+
+def create_workflow_summary(summary) {
+
+    def yaml_file = workDir.resolve('workflow_summary_mqc.yaml')
+    yaml_file.text  = """
+    id: 'nf-core-chipseq-summary'
+    description: " - this information is collected when the pipeline is started."
+    section_name: 'nf-core/chipseq Workflow Summary'
+    section_href: 'https://github.com/nf-core/chipseq'
+    plot_type: 'html'
+    data: |
+        <dl class=\"dl-horizontal\">
+${summary.collect { k,v -> "            <dt>$k</dt><dd><samp>${v ?: '<span style=\"color:#999999;\">N/A</a>'}</samp></dd>" }.join("\n")}
+        </dl>
+    """.stripIndent()
 
-    script:
-    // TODO nf-core: Get all tools to print their version number here
-    """
-    echo $workflow.manifest.version > v_pipeline.txt
-    echo $workflow.nextflow.version > v_nextflow.txt
-    fastqc --version > v_fastqc.txt
-    multiqc --version > v_multiqc.txt
-    scrape_software_versions.py &> software_versions_mqc.yaml
-    """
+   return yaml_file
 }
 
+
+
+/****************************************************
+ * PRE-PROCESSING
+ */
+
+if(!params.bwt2_index && params.fasta){
+    process makeBowtie2Index {
+        tag "$bwt2_base"
+        label 'process_highmem'
+        publishDir path: { params.save_reference ? "${params.outdir}/reference_genome" : params.outdir },
+                   saveAs: { params.save_reference ? it : null }, mode: params.publish_dir_mode
+
+        input:
+        file fasta from fasta_for_index
+
+        output:
+        file "bowtie2_index" into bwt2_index_end2end
+	file "bowtie2_index" into bwt2_index_trim
+
+        script:
+        """
+        mkdir bowtie2_index
+	bowtie2-build ${fasta} bowtie2_index/${bwt2_base}
+	"""
+      }
+ }
+
+
+if(!params.chromosome_size && params.fasta){
+    process makeChromSize {
+        tag "$fasta"
+	label 'process_low'
+        publishDir path: { params.save_reference ? "${params.outdir}/reference_genome" : params.outdir },
+                   saveAs: { params.save_reference ? it : null }, mode: params.publish_dir_mode
+
+        input:
+        file fasta from fasta_for_chromsize
+
+        output:
+        file "*.size" into chromosome_size, chromosome_size_cool
+
+        script:
+        """
+	samtools faidx ${fasta}
+	cut -f1,2 ${fasta}.fai > chrom.size
+   	"""
+      }
+ }
+
+if(!params.restriction_fragments && params.fasta && !params.dnase){
+    process getRestrictionFragments {
+        tag "$fasta ${params.restriction_site}"
+	label 'process_low'
+        publishDir path: { params.save_reference ? "${params.outdir}/reference_genome" : params.outdir },
+                   saveAs: { params.save_reference ? it : null }, mode: params.publish_dir_mode
+
+        input:
+        file fasta from fasta_for_resfrag
+
+        output:
+        file "*.bed" into res_frag_file
+
+        script:
+        """
+	digest_genome.py -r ${params.restriction_site} -o restriction_fragments.bed ${fasta}
+	"""
+      }
+ }
+
+/****************************************************
+ * MAIN WORKFLOW
+ */
+
 /*
- * STEP 1 - FastQC
+ * STEP 1 - Two-steps Reads Mapping
+*/
+
+process bowtie2_end_to_end {
+   tag "$prefix"
+   label 'process_medium'
+   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping" : params.outdir },
+   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: params.publish_dir_mode
+
+   input:
+   set val(sample), file(reads) from raw_reads
+   file index from bwt2_index_end2end.collect()
+
+   output:
+   set val(prefix), file("${prefix}_unmap.fastq") into unmapped_end_to_end
+   set val(prefix), file("${prefix}.bam") into end_to_end_bam
+
+   script:
+   prefix = reads.toString() - ~/(\.fq)?(\.fastq)?(\.gz)?$/
+   def bwt2_opts = params.bwt2_opts_end2end
+
+   if (!params.dnase){
+   """
+   bowtie2 --rg-id BMG --rg SM:${prefix} \\
+	${bwt2_opts} \\
+	-p ${task.cpus} \\
+	-x ${index}/${bwt2_base} \\
+	--un ${prefix}_unmap.fastq \\
+ 	-U ${reads} | samtools view -F 4 -bS - > ${prefix}.bam
+   """
+   }else{
+   """
+   bowtie2 --rg-id BMG --rg SM:${prefix} \\
+	${bwt2_opts} \\
+	-p ${task.cpus} \\
+	-x ${index}/${bwt2_base} \\
+	--un ${prefix}_unmap.fastq \\
+ 	-U ${reads} > ${prefix}.bam
+   """
+   }
+}
+
+process trim_reads {
+   tag "$prefix"
+   label 'process_low'
+   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping" : params.outdir },
+   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: params.publish_dir_mode
+
+   when:
+   !params.dnase
+
+   input:
+   set val(prefix), file(reads) from unmapped_end_to_end
+
+   output:
+   set val(prefix), file("${prefix}_trimmed.fastq") into trimmed_reads
+
+   script:
+   """
+   cutsite_trimming --fastq $reads \\
+                    --cutsite  ${params.ligation_site} \\
+                    --out ${prefix}_trimmed.fastq
+   """
+}
+
+process bowtie2_on_trimmed_reads {
+   tag "$prefix"
+   label 'process_medium'
+   publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping" : params.outdir },
+   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: params.publish_dir_mode
+
+   when:
+   !params.dnase
+
+   input:
+   set val(prefix), file(reads) from trimmed_reads
+   file index from bwt2_index_trim.collect()
+
+   output:
+   set val(prefix), file("${prefix}_trimmed.bam") into trimmed_bam
+
+   script:
+   prefix = reads.toString() - ~/(_trimmed)?(\.fq)?(\.fastq)?(\.gz)?$/
+   """
+   bowtie2 --rg-id BMG --rg SM:${prefix} \\
+           ${params.bwt2_opts_trimmed} \\
+           -p ${task.cpus} \\
+           -x ${index}/${bwt2_base} \\
+           -U ${reads} | samtools view -bS - > ${prefix}_trimmed.bam
+   """
+}
+
+if (!params.dnase){
+   process merge_mapping_steps{
+      tag "$sample = $bam1 + $bam2"
+      label 'process_medium'
+      publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping" : params.outdir },
+   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: params.publish_dir_mode
+
+      input:
+      set val(prefix), file(bam1), file(bam2) from end_to_end_bam.join( trimmed_bam )
+
+      output:
+      set val(sample), file("${prefix}_bwt2merged.bam") into bwt2_merged_bam
+      set val(oname), file("${prefix}.mapstat") into all_mapstat
+
+      script:
+      sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2|_1$|_2)/
+      tag = prefix.toString() =~/_R1|_val_1|_1/ ? "R1" : "R2"
+      oname = prefix.toString() - ~/(\.[0-9]+)$/
+      """
+      samtools merge -@ ${task.cpus} \\
+    	             -f ${prefix}_bwt2merged.bam \\
+                     ${bam1} ${bam2}
+
+      samtools sort -@ ${task.cpus} -m 800M \\
+      	            -n -T /tmp/ \\
+	            -o ${prefix}_bwt2merged.sorted.bam \\
+	            ${prefix}_bwt2merged.bam
+
+      mv ${prefix}_bwt2merged.sorted.bam ${prefix}_bwt2merged.bam
+
+      echo "## ${prefix}" > ${prefix}.mapstat
+      echo -n "total_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c ${prefix}_bwt2merged.bam >> ${prefix}.mapstat
+      echo -n "mapped_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c -F 4 ${prefix}_bwt2merged.bam >> ${prefix}.mapstat
+      echo -n "global_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c -F 4 ${bam1} >> ${prefix}.mapstat
+      echo -n "local_${tag}\t"  >> ${prefix}.mapstat
+      samtools view -c -F 4 ${bam2} >> ${prefix}.mapstat
+      """
+   }
+}else{
+   process dnase_mapping_stats{
+      tag "$sample = $bam1"
+      label 'process_medium'
+      publishDir path: { params.save_aligned_intermediates ? "${params.outdir}/mapping" : params.outdir },
+   	      saveAs: { params.save_aligned_intermediates ? it : null }, mode: params.publish_dir_mode
+
+      input:
+      set val(prefix), file(bam1) from end_to_end_bam
+
+      output:
+      set val(sample), file(bam1) into bwt2_merged_bam
+      set val(oname), file("${prefix}.mapstat") into all_mapstat
+
+      script:
+      sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2|_1|_2)/
+      tag = prefix.toString() =~/_R1|_val_1|_1/ ? "R1" : "R2"
+      oname = prefix.toString() - ~/(\.[0-9]+)$/
+      """
+      echo "## ${prefix}" > ${prefix}.mapstat
+      echo -n "total_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c ${bam1} >> ${prefix}.mapstat
+      echo -n "mapped_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c -F 4 ${bam1} >> ${prefix}.mapstat
+      echo -n "global_${tag}\t" >> ${prefix}.mapstat
+      samtools view -c -F 4 ${bam1} >> ${prefix}.mapstat
+      echo -n "local_${tag}\t0"  >> ${prefix}.mapstat
+      """
+   }
+}
+
+process combine_mapped_files{
+   tag "$sample = $r1_prefix + $r2_prefix"
+   label 'process_low'
+   publishDir "${params.outdir}/mapping", mode: params.publish_dir_mode,
+   	      saveAs: {filename -> filename.indexOf(".pairstat") > 0 ? "stats/$filename" : "$filename"}
+
+   input:
+   set val(sample), file(aligned_bam) from bwt2_merged_bam.groupTuple()
+
+   output:
+   set val(sample), file("${sample}_bwt2pairs.bam") into paired_bam
+   set val(oname), file("*.pairstat") into all_pairstat
+
+   script:
+   r1_bam = aligned_bam[0]
+   r1_prefix = r1_bam.toString() - ~/_bwt2merged.bam$/
+   r2_bam = aligned_bam[1]
+   r2_prefix = r2_bam.toString() - ~/_bwt2merged.bam$/
+   oname = sample.toString() - ~/(\.[0-9]+)$/
+
+   def opts = "-t"
+   opts = params.rm_singleton ? "${opts}" : "--single ${opts}"
+   opts = params.rm_multi ? "${opts}" : "--multi ${opts}"
+   if ("$params.min_mapq".isInteger()) opts="${opts} -q ${params.min_mapq}"
+   """
+   mergeSAM.py -f ${r1_bam} -r ${r2_bam} -o ${sample}_bwt2pairs.bam ${opts}
+   """
+}
+
+
+/*
+ * STEP2 - DETECT VALID PAIRS
+*/
+
+if (!params.dnase){
+   process get_valid_interaction{
+      tag "$sample"
+      label 'process_low'
+      publishDir "${params.outdir}/hic_results/data", mode: params.publish_dir_mode,
+   	      saveAs: {filename -> filename.indexOf("*stat") > 0 ? "stats/$filename" : "$filename"}
+
+      input:
+      set val(sample), file(pe_bam) from paired_bam
+      file frag_file from res_frag_file.collect()
+
+      output:
+      set val(sample), file("*.validPairs") into valid_pairs
+      set val(sample), file("*.validPairs") into valid_pairs_4cool
+      set val(sample), file("*.DEPairs") into de_pairs
+      set val(sample), file("*.SCPairs") into sc_pairs
+      set val(sample), file("*.REPairs") into re_pairs
+      set val(sample), file("*.FiltPairs") into filt_pairs
+      set val(sample), file("*RSstat") into all_rsstat
+
+      script:
+      if (params.split_fastq){
+         sample = sample.toString() - ~/(\.[0-9]+)$/
+      }
+
+      def opts = ""
+      opts += params.min_cis_dist > 0 ? " -d ${params.min_cis_dist}" : ''
+      opts += params.min_insert_size > 0 ?  " -s ${params.min_insert_size}" : ''
+      opts += params.max_insert_size > 0 ? " -l ${params.max_insert_size}" : ''
+      opts += params.min_restriction_fragment_size > 0 ? " -t ${params.min_restriction_fragment_size}" : ''
+      opts += params.max_restriction_fragment_size > 0 ? " -m ${params.max_restriction_fragment_size}" : ''
+      opts += params.save_interaction_bam ? " --sam" : ''
+      prefix = pe_bam.toString() - ~/.bam/
+      """
+      mapped_2hic_fragments.py -f ${frag_file} -r ${pe_bam} --all ${opts}
+      sort -T /tmp/ -k2,2V -k3,3n -k5,5V -k6,6n -o ${prefix}.validPairs ${prefix}.validPairs
+      """
+   }
+}
+else{
+   process get_valid_interaction_dnase{
+      tag "$sample"
+      label 'process_low'
+      publishDir "${params.outdir}/hic_results/data", mode: params.publish_dir_mode,
+   	      saveAs: {filename -> filename.indexOf("*stat") > 0 ? "stats/$filename" : "$filename"}
+
+      input:
+      set val(sample), file(pe_bam) from paired_bam
+
+      output:
+      set val(sample), file("*.validPairs") into valid_pairs
+      set val(sample), file("*.validPairs") into valid_pairs_4cool
+      set val(sample), file("*RSstat") into all_rsstat
+
+      script:
+      if (params.split_fastq){
+         sample = sample.toString() - ~/(\.[0-9]+)$/
+      }
+
+      opts = params.min_cis_dist > 0 ? " -d ${params.min_cis_dist}" : ''
+      prefix = pe_bam.toString() - ~/.bam/
+      """
+      mapped_2hic_dnase.py -r ${pe_bam} ${opts}
+      sort -T /tmp/ -k2,2V -k3,3n -k5,5V -k6,6n -o ${prefix}.validPairs ${prefix}.validPairs
+      """
+   }
+}
+
+
+/*
+ * STEP3 - BUILD MATRIX
+*/
+
+process remove_duplicates {
+   tag "$sample"
+   label 'process_highmem'
+   publishDir "${params.outdir}/hic_results/data", mode: params.publish_dir_mode,
+   	      saveAs: {filename -> filename.indexOf("*stat") > 0 ? "stats/$sample/$filename" : "$filename"}
+
+   input:
+   set val(sample), file(vpairs) from valid_pairs.groupTuple()
+
+   output:
+   set val(sample), file("*.allValidPairs") into all_valid_pairs
+   set val(sample), file("*.allValidPairs") into all_valid_pairs_4cool
+   file("stats/") into all_mergestat
+
+   script:
+   if ( params.rm_dup ){
+   """
+   mkdir -p stats/${sample}
+
+   ## Sort valid pairs and remove read pairs with same starts (i.e duplicated read pairs)
+   sort -T /tmp/ -S 50% -k2,2V -k3,3n -k5,5V -k6,6n -m ${vpairs} | \
+   awk -F"\\t" 'BEGIN{c1=0;c2=0;s1=0;s2=0}(c1!=\$2 || c2!=\$5 || s1!=\$3 || s2!=\$6){print;c1=\$2;c2=\$5;s1=\$3;s2=\$6}' > ${sample}.allValidPairs
+
+   echo -n "valid_interaction\t" > stats/${sample}/${sample}_allValidPairs.mergestat
+   cat ${vpairs} | wc -l >> stats/${sample}/${sample}_allValidPairs.mergestat
+   echo -n "valid_interaction_rmdup\t" >> stats/${sample}/${sample}_allValidPairs.mergestat
+   cat ${sample}.allValidPairs | wc -l >> stats/${sample}/${sample}_allValidPairs.mergestat
+
+   ## Count short range (<20000) vs long range contacts
+   awk 'BEGIN{cis=0;trans=0;sr=0;lr=0} \$2 == \$5{cis=cis+1; d=\$6>\$3?\$6-\$3:\$3-\$6; if (d<=20000){sr=sr+1}else{lr=lr+1}} \$2!=\$5{trans=trans+1}END{print "trans_interaction\\t"trans"\\ncis_interaction\\t"cis"\\ncis_shortRange\\t"sr"\\ncis_longRange\\t"lr}' ${sample}.allValidPairs >> stats/${sample}/${sample}_allValidPairs.mergestat
+
+   """
+   }else{
+   """
+   mkdir -p stats/${sample}
+   cat ${vpairs} > ${sample}.allValidPairs
+   echo -n "valid_interaction\t" > stats/${sample}/${sample}_allValidPairs.mergestat
+   cat ${vpairs} | wc -l >> stats/${sample}/${sample}_allValidPairs.mergestat
+   echo -n "valid_interaction_rmdup\t" >> stats/${sample}/${sample}_allValidPairs.mergestat
+   cat ${sample}.allValidPairs | wc -l >> stats/${sample}/${sample}_allValidPairs.mergestat
+
+   ## Count short range (<20000) vs long range contacts
+   awk 'BEGIN{cis=0;trans=0;sr=0;lr=0} \$2 == \$5{cis=cis+1; d=\$6>\$3?\$6-\$3:\$3-\$6; if (d<=20000){sr=sr+1}else{lr=lr+1}} \$2!=\$5{trans=trans+1}END{print "trans_interaction\\t"trans"\\ncis_interaction\\t"cis"\\ncis_shortRange\\t"sr"\\ncis_longRange\\t"lr}' ${sample}.allValidPairs >> stats/${sample}/${sample}_allValidPairs.mergestat
+   """
+   }
+}
+
+process merge_sample {
+   tag "$ext"
+   label 'process_low'
+   publishDir "${params.outdir}/hic_results/stats/${sample}", mode: params.publish_dir_mode
+
+   input:
+   set val(prefix), file(fstat) from all_mapstat.groupTuple().concat(all_pairstat.groupTuple(), all_rsstat.groupTuple())
+
+   output:
+   file("mstats/") into all_mstats
+
+  script:
+  sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2|_1|_2)/
+  if ( (fstat =~ /.mapstat/) ){ ext = "mmapstat" }
+  if ( (fstat =~ /.pairstat/) ){ ext = "mpairstat" }
+  if ( (fstat =~ /.RSstat/) ){ ext = "mRSstat" }
+  """
+  mkdir -p mstats/${sample}
+  merge_statfiles.py -f ${fstat} > mstats/${sample}/${prefix}.${ext}
+  """
+}
+
+process build_contact_maps{
+   tag "$sample - $mres"
+   label 'process_highmem'
+   publishDir "${params.outdir}/hic_results/matrix/raw", mode: params.publish_dir_mode
+
+   when:
+   !params.skip_maps
+
+   input:
+   set val(sample), file(vpairs), val(mres) from all_valid_pairs.combine(map_res)
+   file chrsize from chromosome_size.collect()
+
+   output:
+   file("*.matrix") into raw_maps
+   file "*.bed"
+
+   script:
+   """
+   build_matrix --matrix-format upper  --binsize ${mres} --chrsizes ${chrsize} --ifile ${vpairs} --oprefix ${sample}_${mres}
+   """
+}
+
+/*
+ * STEP 4 - NORMALIZE MATRIX
+*/
+
+process run_ice{
+   tag "$rmaps"
+   label 'process_highmem'
+   publishDir "${params.outdir}/hic_results/matrix/iced", mode: params.publish_dir_mode
+
+   when:
+   !params.skip_maps && !params.skip_ice
+
+   input:
+   file(rmaps) from raw_maps
+   file "*.biases"
+
+   output:
+   file("*iced.matrix") into iced_maps
+
+   script:
+   prefix = rmaps.toString() - ~/(\.matrix)?$/
+   """
+   ice --filter_low_counts_perc ${params.ice_filer_low_count_perc} \
+   --results_filename ${prefix}_iced.matrix \
+   --filter_high_counts_perc ${params.ice_filer_high_count_perc} \
+   --max_iter ${params.ice_max_iter} --eps ${params.ice_eps} --remove-all-zeros-loci --output-bias 1 --verbose 1 ${rmaps}
+   """
+}
+
+
+/*
+ * STEP 5 - COOLER FILE
  */
-process fastqc {
-    tag "$name"
-    label 'process_medium'
-    publishDir "${params.outdir}/fastqc", mode: params.publish_dir_mode,
-        saveAs: { filename ->
-                      filename.indexOf(".zip") > 0 ? "zips/$filename" : "$filename"
-                }
+process generate_cool{
+   tag "$sample"
+   label 'process_medium'
+   publishDir "${params.outdir}/export/cool", mode: params.publish_dir_mode
 
-    input:
-    set val(name), file(reads) from ch_read_files_fastqc
+   when:
+   !params.skip_cool
 
-    output:
-    file "*_fastqc.{zip,html}" into ch_fastqc_results
+   input:
+   set val(sample), file(vpairs) from all_valid_pairs_4cool
+   file chrsize from chromosome_size_cool.collect()
 
-    script:
-    """
-    fastqc --quiet --threads $task.cpus $reads
-    """
+   output:
+   file("*mcool") into cool_maps
+
+   script:
+   """
+   hicpro2higlass.sh -p ${task.cpus} -i $vpairs -r 5000 -c ${chrsize} -n
+   """
 }
 
+
 /*
- * STEP 2 - MultiQC
+ * STEP 6 - MultiQC
  */
 process multiqc {
-    publishDir "${params.outdir}/MultiQC", mode: params.publish_dir_mode
+   label 'process_low'
+   publishDir "${params.outdir}/MultiQC", mode: params.publish_dir_mode
 
-    input:
-    file (multiqc_config) from ch_multiqc_config
-    file (mqc_custom_config) from ch_multiqc_custom_config.collect().ifEmpty([])
-    // TODO nf-core: Add in log files from your new processes for MultiQC to find!
-    file ('fastqc/*') from ch_fastqc_results.collect().ifEmpty([])
-    file ('software_versions/*') from ch_software_versions_yaml.collect()
-    file workflow_summary from ch_workflow_summary.collectFile(name: "workflow_summary_mqc.yaml")
-
-    output:
-    file "*multiqc_report.html" into ch_multiqc_report
-    file "*_data"
-    file "multiqc_plots"
-
-    script:
-    rtitle = custom_runName ? "--title \"$custom_runName\"" : ''
-    rfilename = custom_runName ? "--filename " + custom_runName.replaceAll('\\W','_').replaceAll('_+','_') + "_multiqc_report" : ''
-    custom_config_file = params.multiqc_config ? "--config $mqc_custom_config" : ''
-    // TODO nf-core: Specify which MultiQC modules to use with -m for a faster run time
-    """
-    multiqc -f $rtitle $rfilename $custom_config_file .
-    """
+   when:
+   !params.skip_multiqc
+
+   input:
+   file multiqc_config from ch_multiqc_config
+   file (mqc_custom_config) from ch_multiqc_custom_config.collect().ifEmpty([])
+   file ('input_*/*') from all_mstats.concat(all_mergestat).collect()
+   file ('software_versions/*') from software_versions_yaml
+   file workflow_summary from create_workflow_summary(summary)
+
+   output:
+   file "*multiqc_report.html" into multiqc_report
+   file "*_data"
+
+   script:
+   rtitle = custom_runName ? "--title \"$custom_runName\"" : ''
+   rfilename = custom_runName ? "--filename " + custom_runName.replaceAll('\\W','_').replaceAll('_+','_') + "_multiqc_report" : ''
+   """
+   multiqc -f $rtitle $rfilename --config $multiqc_config .
+   """
 }
 
 /*
- * STEP 3 - Output Description HTML
+ * STEP 7 - Output Description HTML
  */
 process output_documentation {
     publishDir "${params.outdir}/pipeline_info", mode: params.publish_dir_mode
@@ -261,18 +875,19 @@ process output_documentation {
     file output_docs from ch_output_docs
     file images from ch_output_docs_images
 
-    output:
-    file "results_description.html"
+   output:
+   file "results_description.html"
 
-    script:
-    """
-    markdown_to_html.py $output_docs -o results_description.html
-    """
+   script:
+   """
+   markdown_to_html.py $output_docs -o results_description.html
+   """
 }
 
 /*
  * Completion e-mail notification
  */
+
 workflow.onComplete {
 
     // Set up the e-mail variables
@@ -303,7 +918,7 @@ workflow.onComplete {
     email_fields['summary']['Nextflow Build'] = workflow.nextflow.build
     email_fields['summary']['Nextflow Compile Timestamp'] = workflow.nextflow.timestamp
 
-    // TODO nf-core: If not using MultiQC, strip out this code (including params.max_multiqc_email_size)
+    // If not using MultiQC, strip out this code (including params.maxMultiqcEmailFileSize)
     // On success try attach the multiqc report
     def mqc_report = null
     try {
diff --git a/nextflow.config b/nextflow.config
index d96244a3a8c4626a53960deea6df647e8b005311..bd5235b8ae579131bbc8dde2a62f6c3d044aa476 100644
--- a/nextflow.config
+++ b/nextflow.config
@@ -9,16 +9,52 @@
 params {
 
   // Workflow flags
-  // TODO nf-core: Specify your pipeline's command line flags
   genome = false
   input = "data/*{1,2}.fastq.gz"
   single_end = false
+
   outdir = './results'
+  genome = false
+  input_paths = false
+  split_fastq = false
+  fastq_chunks_size = 20000000
+  chromosome_size = false
+  restriction_fragments = false
+  skip_maps = false
+  skip_ice = false
+  skip_cool = false
+  skip_multiqc = false
+  save_reference = false
+  save_interaction_bam = false
+  save_aligned_intermediates = false
+
+  bwt2_opts_end2end = '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'
+  bwt2_opts_trimmed = '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'
+  min_mapq = 10
+
+  // Digestion Hi-C
+  restriction_site = 'A^AGCTT'
+  ligation_site = 'AAGCTAGCTT'
+  min_restriction_fragment_size = 0
+  max_restriction_fragment_size = 0
+  min_insert_size = 0
+  max_insert_size = 0
+  dnase = false
+  min_cis_dist = 0
+  rm_dup = true
+  rm_singleton = true
+  rm_multi = true
+  bin_size = '1000000,500000'
+  ice_max_iter = 100
+  ice_filer_low_count_perc = 0.02
+  ice_filer_high_count_perc =  0
+  ice_eps = 0.1
+  
   publish_dir_mode = 'copy'
 
   // Boilerplate options
-  name = false
   multiqc_config = false
+  name = false
   email = false
   email_on_fail = false
   max_multiqc_email_size = 25.MB
@@ -28,6 +64,7 @@ params {
   igenomes_base = 's3://ngi-igenomes/igenomes/'
   tracedir = "${params.outdir}/pipeline_info"
   igenomes_ignore = false
+
   custom_config_version = 'master'
   custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}"
   hostnames = false
@@ -36,10 +73,9 @@ params {
   config_profile_url = false
 
   // Defaults only, expecting to be overwritten
-  max_memory = 128.GB
-  max_cpus = 16
+  max_memory = 24.GB
+  max_cpus = 8
   max_time = 240.h
-
 }
 
 // Container slug. Stable releases should specify release tag!
@@ -56,6 +92,7 @@ try {
   System.err.println("WARNING: Could not load nf-core/config profiles: ${params.custom_config_base}/nfcore_custom.config")
 }
 
+// Create profiles
 profiles {
   conda { process.conda = "$projectDir/environment.yml" }
   debug { process.beforeScript = 'echo $HOSTNAME' }
diff --git a/nextflow_schema.json b/nextflow_schema.json
index 21e29d7e5fc226e377bef6fa7e4d9b498d281eed..84bb558fd02002db37df4755daa2dff36d09d89f 100644
--- a/nextflow_schema.json
+++ b/nextflow_schema.json
@@ -20,6 +20,23 @@
                     "description": "Input FastQ files.",
                     "help_text": "Use this to specify the location of your input FastQ files. For example:\n\n```bash\n--input 'path/to/data/sample_*_{1,2}.fastq'\n```\n\nPlease note the following requirements:\n\n1. The path must be enclosed in quotes\n2. The path must have at least one `*` wildcard character\n3. When using the pipeline with paired end data, the path must use `{1,2}` notation to specify read pairs.\n\nIf left unspecified, a default pattern is used: `data/*{1,2}.fastq.gz`"
                 },
+                "input_paths": {
+                    "type": "string",
+                    "hidden": true,
+                    "description": "Input FastQ files for test only",
+                    "default": "undefined"
+                },
+                "split_fastq": {
+                    "type": "boolean",
+                    "description": "Split the reads into chunks before running the pipelne",
+                    "fa_icon": "fas fa-dna",
+		    "default": "false"
+                },
+		"fastq_chunks_size":{
+		    "type": "integer",
+		    "description": "Read number per chunks if split_fastq is used",
+		    "default": "20000000"
+		},
                 "single_end": {
                     "type": "boolean",
                     "description": "Specifies that the input is single-end reads.",
@@ -72,6 +89,183 @@
                     "fa_icon": "fas fa-ban",
                     "hidden": true,
                     "help_text": "Do not load `igenomes.config` when running the pipeline. You may choose this option if you observe clashes between custom parameters and those supplied in `igenomes.config`."
+                },
+                "bwt2_index": {
+                    "type": "string",
+                    "description": "Full path to directory containing Bowtie index including base name. i.e. `/path/to/index/base`.",
+                    "fa_icon": "far fa-file-alt"
+                },
+                "chromosome_size": {
+                    "type": "string",
+                    "description": "Full path to file specifying chromosome sizes (tab separated with chromosome name and size)`.",
+                    "fa_icon": "far fa-file-alt",
+                    "help_text": "If not specified, the pipeline will build this file from the reference genome file"
+                },
+                "restriction_fragments": {
+                    "type": "string",
+                    "description": "Full path to restriction fragment (bed) file.",
+                    "fa_icon": "far fa-file-alt",
+                    "help_text": "This file depends on the Hi-C protocols and digestion strategy. If not provided, the pipeline will build it using the --restriction_site option"
+                },
+                "save_reference": {
+                    "type": "boolean",
+                    "description": "If generated by the pipeline save the annotation and indexes in the results directory.",
+                    "help_text": "Use this parameter to save all annotations to your results folder. These can then be used for future pipeline runs, reducing processing times.",
+                    "fa_icon": "fas fa-save"
+                }
+            }
+        },
+        "data_processing_options": {
+            "title": "Data processing",
+            "type": "object",
+            "description": "Parameters for Hi-C data processing",
+            "default": "",
+            "fa_icon": "fas fa-bahai",
+            "properties": {
+                "dnase": {
+                    "type": "boolean",
+                    "description": "For Hi-C protocols which are not based on enzyme digestion such as DNase Hi-C"
+                },
+                "restriction_site": {
+                    "type": "string",
+                    "default": "'A^AGCTT'",
+                    "description": "Restriction motifs used during digestion. Several motifs (comma separated) can be provided."
+                },
+                "ligation_site": {
+                    "type": "string",
+                    "default": "'AAGCTAGCTT",
+                    "description": "Expected motif after DNA ligation.  Several motifs (comma separated) can be provided."
+                },
+                "rm_dup": {
+                    "type": "boolean",
+                    "description": "Remove duplicates",
+                    "default": true
+                },
+                "rm_multi": {
+                    "type": "boolean",
+                    "description": "Remove multi-mapped reads",
+                    "default": true
+                },
+                "rm_singleton": {
+                    "type": "boolean",
+                    "description": "Remove singleton",
+                    "default": true
+                },
+                "min_mapq": {
+                    "type": "integer",
+                    "default": "10",
+                    "description": "Keep aligned reads with a minimum quality value"
+                },
+                "bwt2_opts_end2end": {
+                    "type": "string",
+                    "default": "'--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'",
+                    "description": "Option for end-to-end bowtie mapping"
+                },
+                "bwt2_opts_trimmed": {
+                    "type": "string",
+                    "default": "'--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'",
+                    "description": "Option for trimmed reads mapping"
+                },
+                "save_interaction_bam": {
+                    "type": "boolean",
+                    "description": "Save a BAM file where all reads are flagged by their interaction classes"
+                },
+                "save_aligned_intermediates": {
+                    "type": "boolean",
+                    "description": "Save all BAM files during two-steps mapping"
+                }
+            }
+        },
+        "contacts_calling_options": {
+            "title": "Contacts calling",
+            "type": "object",
+            "description": "Options to call significant interactions",
+            "default": "",
+            "fa_icon": "fas fa-signature",
+            "properties": {
+                "min_cis_dist": {
+                    "type": "integer",
+                    "default": "O",
+                    "description": "Minimum distance between loci to consider. Useful for --dnase mode to remove spurious ligation products. Only values > 0 are considered"
+                },
+                "max_insert_size": {
+                    "type": "integer",
+                    "default": "0",
+                    "description": "Maximum fragment size to consider. Only values > 0 are considered"
+                },
+                "min_insert_size": {
+                    "type": "integer",
+                    "default": "0",
+                    "description": "Minimum fragment size to consider. Only values > 0 are considered"
+                },
+                "max_restriction_fragment_size": {
+                    "type": "integer",
+                    "default": "0",
+                    "description": "Maximum restriction fragment size to consider. Only values > 0 are considered"
+                },
+                "min_restriction_fragment_size": {
+                    "type": "integer",
+                    "default": "0",
+                    "description": "Minimum restriction fragment size to consider. Only values > 0 are considered"
+                }
+            }
+        },
+        "contact_maps_options": {
+            "title": "Contact maps",
+            "type": "object",
+            "description": "Options to build Hi-C contact maps",
+            "default": "",
+            "fa_icon": "fas fa-chess-board",
+            "properties": {
+                "bin_size": {
+                    "type": "string",
+                    "default": "'1000000,500000'",
+                    "description": "Resolution to build the maps (comma separated)"
+                },
+                "ice_filer_low_count_perc": {
+                    "type": "string",
+                    "default": 0.02,
+                    "description": "Filter low counts rows before normalization"
+                },
+                "ice_filer_high_count_perc": {
+                    "type": "integer",
+                    "default": "0",
+                    "description": "Filter high counts rows before normalization"
+                },
+                "ice_eps": {
+                    "type": "string",
+                    "default": "0.1",
+                    "description": "Threshold for ICE convergence"
+                },
+                "ice_max_iter": {
+                    "type": "integer",
+                    "default": "100",
+                    "description": "Maximum number of iteraction for ICE normalization"
+                }
+            }
+        },
+        "skip_options": {
+            "title": "Skip options",
+            "type": "object",
+            "description": "Skip some steps of the pipeline",
+            "default": "",
+            "fa_icon": "fas fa-random",
+            "properties": {
+                "skip_maps": {
+                    "type": "boolean",
+                    "description": "Do not build contact maps"
+                },
+                "skip_ice": {
+                    "type": "boolean",
+                    "description": "Do not normalize contact maps"
+                },
+                "skip_cool": {
+                    "type": "boolean",
+                    "description": "Do not generate cooler file"
+                },
+                "skip_multiqc": {
+                    "type": "boolean",
+                    "description": "Do not generate MultiQC report"
                 }
             }
         },
@@ -246,6 +440,18 @@
         {
             "$ref": "#/definitions/reference_genome_options"
         },
+        {
+            "$ref": "#/definitions/data_processing_options"
+        },
+        {
+            "$ref": "#/definitions/contacts_calling_options"
+        },
+        {
+            "$ref": "#/definitions/contact_maps_options"
+        },
+        {
+            "$ref": "#/definitions/skip_options"
+        },
         {
             "$ref": "#/definitions/generic_options"
         },