diff --git a/.travis.yml b/.travis.yml index b3e7a99f81be8e2ce96c6be8f54801a093e76cc4..bc8037e185a6231eed8da45d830148cdce17f087 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,7 +14,7 @@ before_install: - docker pull nfcore/hic:dev # Fake the tag locally so that the pipeline runs properly # Looks weird when this is :dev to :dev, but makes sense when testing code for a release (:dev to :1.0.1) - - docker tag nfcore/hic:dev nfcore/hic:1.0.0 + - docker tag nfcore/hic:dev nfcore/hic:dev install: # Install Nextflow diff --git a/CHANGELOG.md b/CHANGELOG.md index 08f3ef05433140b43ec55ce8bc9b67298f5dc710..7142fd883baa866d8d117030139d6261ef11a2d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,26 @@ # nf-core/hic: Changelog +## v1.1dev + +* Support 'N' base motif in restriction/ligation sites +* Support multiple restriction enzymes/ligattion sites (comma separated) (#31) +* Add --saveInteractionBAM option +* Add DOI (#29) +* Fix bug for reads extension _1/_2 (#30) +* Update manual (#28) + ## v1.0 - 2019-05-06 -First version of nf-core Hi-C pipeline which is a Nextflow implementation of the [HiC-Pro pipeline](https://github.com/nservant/HiC-Pro/). +First version of nf-core Hi-C pipeline which is a Nextflow implementation of +the [HiC-Pro pipeline](https://github.com/nservant/HiC-Pro/). Note that all HiC-Pro functionalities are not yet all implemented. -The current version supports most protocols including Hi-C, in situ Hi-C, DNase Hi-C, Micro-C, capture-C or HiChip data. +The current version supports most protocols including Hi-C, in situ Hi-C, +DNase Hi-C, Micro-C, capture-C or HiChip data. In summary, this version allows : -* Automatic detection and generation of annotation files based on igenomes if not provided. +* Automatic detection and generation of annotation files based on igenomes +if not provided. * Two-steps alignment of raw sequencing reads * Reads filtering and detection of valid interaction products * Generation of raw contact matrices for a set of resolutions diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 09226d0d8d896bbc3bdb632476430d6cad4b0aa7..a977481246a45a03d3b03a99439e7dc1d4d3b7f1 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,11 +2,17 @@ ## Our Pledge -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project +and our community a harassment-free experience for everyone, regardless of +age, body size, disability, ethnicity, gender identity and expression, level +of experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. ## Our Standards -Examples of behavior that contributes to creating a positive environment include: +Examples of behavior that contributes to creating a positive environment +include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences @@ -16,31 +22,55 @@ Examples of behavior that contributes to creating a positive environment include Examples of unacceptable behavior by participants include: -* The use of sexualized language or imagery and unwelcome sexual attention or advances +* The use of sexualized language or imagery and unwelcome sexual attention +or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting ## Our Responsibilities -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. ## Scope -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. ## Enforcement -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team on [Slack](https://nf-core-invite.herokuapp.com/). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team on +[Slack](https://nf-core-invite.herokuapp.com/). The project team will review +and investigate all complaints, and will respond in a way that it deems +appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. Further details +of specific enforcement policies may be posted separately. -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. ## Attribution -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ diff --git a/Dockerfile b/Dockerfile index 06374cf95db6f1a3a68e8c45ab48d2d3ac1d2c2f..bd89e7eed206414d6257f30f7ed2ebd91fb23971 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,4 +7,4 @@ RUN apt-get update && apt-get install -y gcc g++ && apt-get clean -y COPY environment.yml / RUN conda env create -f /environment.yml && conda clean -a -ENV PATH /opt/conda/envs/nf-core-hic-1.0.0/bin:$PATH +ENV PATH /opt/conda/envs/nf-core-hic-1.1.0dev/bin:$PATH diff --git a/README.md b/README.md index 37692cf5d7e8901feeb77f29da73ad500ca46cc4..2227cb44fc2348a16c7b48a509eb9dafb2aa3a54 100644 --- a/README.md +++ b/README.md @@ -7,19 +7,31 @@ [](http://bioconda.github.io/) [](https://hub.docker.com/r/nfcore/hic) - + -### Introduction -This pipeline is based on the [HiC-Pro workflow](https://github.com/nservant/HiC-Pro). -It was designed to process Hi-C data from raw fastq files (paired-end Illumina data) to normalized contact maps. -The current version supports most protocols, including digestion protocols as well as protocols that do not require restriction enzymes such as DNase Hi-C. -In practice, this workflow was successfully applied to many data-sets including dilution Hi-C, in situ Hi-C, DNase Hi-C, Micro-C, capture-C, capture Hi-C or HiChip data. +[](https://doi.org/10.5281/zenodo.2669513) -The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible. +## Introduction -### Pipeline summary -1. Mapping using a two steps strategy to rescue reads spanning the ligation sites (bowtie2) +This pipeline is based on the +[HiC-Pro workflow](https://github.com/nservant/HiC-Pro). +It was designed to process Hi-C data from raw fastq files (paired-end Illumina +data) to normalized contact maps. +The current version supports most protocols, including digestion protocols as +well as protocols that do not require restriction enzymes such as DNase Hi-C. +In practice, this workflow was successfully applied to many data-sets including +dilution Hi-C, in situ Hi-C, DNase Hi-C, Micro-C, capture-C, capture Hi-C or +HiChip data. + +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool +to run tasks across multiple compute infrastructures in a very portable manner. +It comes with docker / singularity containers making installation trivial and +results highly reproducible. + +## Pipeline summary + +1. Mapping using a two steps strategy to rescue reads spanning the ligation +sites (bowtie2) 2. Detection of valid interaction products 3. Duplicates removal 4. Create genome-wide contact maps at various resolution @@ -27,17 +39,29 @@ The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool 6. Quality controls and report (MultiQC) 7. Addition export for visualisation and downstream analysis (cooler) -### Documentation -The nf-core/hic pipeline comes with documentation about the pipeline, found in the `docs/` directory: +## Documentation + +The nf-core/hic pipeline comes with documentation about the pipeline, found in +the `docs/` directory: 1. [Installation](docs/installation.md) 2. Pipeline configuration * [Local installation](docs/configuration/local.md) * [Adding your own system](docs/configuration/adding_your_own.md) - * [Reference genomes](docs/configuration/reference_genomes.md) + * [Reference genomes](docs/configuration/reference_genomes.md) 3. [Running the pipeline](docs/usage.md) 4. [Output and how to interpret the results](docs/output.md) 5. [Troubleshooting](docs/troubleshooting.md) -### Credits +## Credits + nf-core/hic was originally written by Nicolas Servant. + +If you use nf-core/hic for your analysis, please cite it using the following +doi: [10.5281/zenodo.2669513](https://doi.org/10.5281/zenodo.2669513) + +You can cite the `nf-core` pre-print as follows: +Ewels PA, Peltzer A, Fillinger S, Alneberg JA, Patel H, Wilm A, Garcia MU, Di +Tommaso P, Nahnsen S. **nf-core: Community curated bioinformatics pipelines**. +*bioRxiv*. 2019. p. 610741. +[doi: 10.1101/610741](https://www.biorxiv.org/content/10.1101/610741v1). diff --git a/bin/digest_genome.py b/bin/digest_genome.py index db2d151602269c22f3b0a837446bba30a4b442b6..ac6d8da3d9f6faa3e9c0960fbecac52ea30da61a 100755 --- a/bin/digest_genome.py +++ b/bin/digest_genome.py @@ -47,6 +47,7 @@ def find_re_sites(filename, sequences, offset): indices.sort() all_indices.append(indices) indices = [] + # This is a new chromosome. Empty the sequence string, and add the # correct chrom id big_str = "" @@ -67,6 +68,7 @@ def find_re_sites(filename, sequences, offset): for m in re.finditer(pattern, big_str)] indices.sort() all_indices.append(indices) + return contig_names, all_indices @@ -87,6 +89,22 @@ def find_chromsomose_lengths(reference_filename): return chromosome_names, np.array(chromosome_lengths) +def replaceN(cs): + npos = int(cs.find('N')) + cseql = [] + if npos!= -1: + for nuc in ["A","C","G","T"]: + tmp = cs.replace('N', nuc, 1) + tmpl = replaceN(tmp) + if type(tmpl)==list: + cseql = cseql + tmpl + else: + cseql.append(tmpl) + else: + cseql.append(cs) + return cseql + + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('fastafile') @@ -102,8 +120,13 @@ if __name__ == "__main__": filename = args.fastafile out = args.out - cutsites = args.res_sites - + + # Split restriction sites if comma-separated + cutsites=[] + for s in args.res_sites: + for m in s.split(','): + cutsites.append(m) + # process args and get restriction enzyme sequences sequences = [] offset = [] @@ -112,15 +135,34 @@ if __name__ == "__main__": cseq = ''.join(RE_cutsite[cs.lower()]) else: cseq = cs + offpos = int(cseq.find('^')) if offpos == -1: print "Unable to detect offset for", cseq print "Please, use '^' to specified the cutting position,", print "i.e A^GATCT for HindIII digestion" sys.exit(-1) + + for nuc in list(set(cs)): + if nuc != 'A' and nuc != 'C' and nuc != 'G' and nuc != 'T' and nuc != 'N' and nuc != '^': + print "Find unexpected character ['",nuc,"']in restriction motif" + print "Note that multiple motifs should be separated by a space (not a comma !)" + sys.exit(-1) + offset.append(offpos) sequences.append(re.sub('\^', '', cseq)) + # replace all N in restriction motif + sequences_without_N = [] + offset_without_N = [] + for rs in range(len(sequences)): + nrs = replaceN(sequences[rs]) + sequences_without_N = sequences_without_N + nrs + offset_without_N = offset_without_N + [offset[rs]] * len(nrs) + + sequences = sequences_without_N + offset = offset_without_N + if out is None: out = os.path.splitext(filename)[0] + "_fragments.bed" @@ -129,8 +171,7 @@ if __name__ == "__main__": print "Offset(s)", ','.join(str(x) for x in offset) # Read fasta file and look for rs per chromosome - contig_names, all_indices = find_re_sites(filename, sequences, - offset=offset) + contig_names, all_indices = find_re_sites(filename, sequences, offset=offset) _, lengths = find_chromsomose_lengths(filename) valid_fragments = [] diff --git a/bin/mapped_2hic_fragments.py b/bin/mapped_2hic_fragments.py index efa32e6d681c6fc1cbf60843bc588f1371de30f1..d4790ee3114a071cff3131543159d9124bbab1c6 100755 --- a/bin/mapped_2hic_fragments.py +++ b/bin/mapped_2hic_fragments.py @@ -53,7 +53,7 @@ def get_args(): "minInsertSize=", "maxInsertSize", "minFragSize", "maxFragSize", "minDist", - "gatg", "samOut", "verbose", "all", "help"]) + "gatg", "sam", "verbose", "all", "help"]) except getopt.GetoptError: usage() sys.exit(-1) @@ -442,7 +442,7 @@ def get_interaction_type(read1, read1_chrom, resfrag1, read2, # If returned InteractionType=None -> Same restriction fragment # and same strand = Dump interactionType = None - + if not read1.is_unmapped and not read2.is_unmapped and resfrag1 is not None and resfrag2 is not None: # same restriction fragment if resfrag1 == resfrag2: @@ -501,9 +501,9 @@ if __name__ == "__main__": minInsertSize = arg elif opt in ("-l", "--longestInsertSize"): maxInsertSize = arg - elif opt in ("-t", "--shortestFragmentSize"): + elif opt in ("-t", "--shortestFragmentLength"): minFragSize = arg - elif opt in ("-m", "--longestFragmentSize"): + elif opt in ("-m", "--longestFragmentLength"): maxFragSize = arg elif opt in ("-d", "--minCisDist"): minDist = arg @@ -613,6 +613,7 @@ if __name__ == "__main__": r2_chrom = None if r1_resfrag is not None or r2_resfrag is not None: + interactionType = get_interaction_type(r1, r1_chrom, r1_resfrag, r2, r2_chrom, r2_resfrag, verbose) dist = get_PE_fragment_size(r1, r2, r1_resfrag, r2_resfrag, interactionType) cdist = get_cis_dist(r1, r2) @@ -724,10 +725,14 @@ if __name__ == "__main__": if or1_resfrag is not None: or1_fragname = or1_resfrag.value['name'] - + else: + or1_fragname = 'None' + if or2_resfrag is not None: or2_fragname = or2_resfrag.value['name'] - + else: + or2_fragname = 'None' + cur_handler.write( or1.qname + "\t" + or1_chrom + "\t" + diff --git a/conf/hicpro.config b/conf/hicpro.config index 0a2c9b9e0db09f4f9861ba353b84a534820aba38..01b755a955c5aee521a6cf43b00847cfbc8d0cd3 100644 --- a/conf/hicpro.config +++ b/conf/hicpro.config @@ -38,5 +38,6 @@ params { saveReference = false saveAlignedIntermediates = false + saveInteractionBAM = false } diff --git a/docs/installation.md b/docs/installation.md index 9ac66d585871d374c90df1f14b2c192f2d24b7a8..c3dc01893dd774c1cf5925ddbf441a26fdc24f93 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -14,7 +14,9 @@ To start using the nf-core/hic pipeline, follow the steps below: 4. [Reference genomes](#4-reference-genomes) ## 1) Install NextFlow -Nextflow runs on most POSIX systems (Linux, Mac OSX etc). It can be installed by running the following commands: + +Nextflow runs on most POSIX systems (Linux, Mac OSX etc). It can be installed +by running the following commands: ```bash # Make sure that Java v8+ is installed: @@ -29,15 +31,21 @@ mv nextflow ~/bin/ # sudo mv nextflow /usr/local/bin ``` -See [nextflow.io](https://www.nextflow.io/) for further instructions on how to install and configure Nextflow. +See [nextflow.io](https://www.nextflow.io/) for further instructions on how to +install and configure Nextflow. ## 2) Install the pipeline -#### 2.1) Automatic -This pipeline itself needs no installation - NextFlow will automatically fetch it from GitHub if `nf-core/hic` is specified as the pipeline name. +### 2.1) Automatic + +This pipeline itself needs no installation - NextFlow will automatically fetch +it from GitHub if `nf-core/hic` is specified as the pipeline name. + +### 2.2) Offline -#### 2.2) Offline -The above method requires an internet connection so that Nextflow can download the pipeline files. If you're running on a system that has no internet connection, you'll need to download and transfer the pipeline files manually: +The above method requires an internet connection so that Nextflow can download +the pipeline files. If you're running on a system that has no internet +connection, you'll need to download and transfer the pipeline files manually: ```bash wget https://github.com/nf-core/hic/archive/master.zip @@ -47,61 +55,91 @@ cd ~/my_data/ nextflow run ~/my-pipelines/nf-core/hic-master ``` -To stop nextflow from looking for updates online, you can tell it to run in offline mode by specifying the following environment variable in your ~/.bashrc file: +To stop nextflow from looking for updates online, you can tell it to run in +offline mode by specifying the following environment variable in your +~/.bashrc file: ```bash export NXF_OFFLINE='TRUE' ``` -#### 2.3) Development - -If you would like to make changes to the pipeline, it's best to make a fork on GitHub and then clone the files. Once cloned you can run the pipeline directly as above. +### 2.3) Development +If you would like to make changes to the pipeline, it's best to make a fork on +GitHub and then clone the files. Once cloned you can run the pipeline directly +as above. ## 3) Pipeline configuration -By default, the pipeline loads a basic server configuration [`conf/base.config`](../conf/base.config) -This uses a number of sensible defaults for process requirements and is suitable for running -on a simple (if powerful!) local server. + +By default, the pipeline loads a basic server configuration +[`conf/base.config`](../conf/base.config) +This uses a number of sensible defaults for process requirements and is +suitable for running on a simple (if powerful!) local server. Be warned of two important points about this default configuration: 1. The default profile uses the `local` executor - * All jobs are run in the login session. If you're using a simple server, this may be fine. If you're using a compute cluster, this is bad as all jobs will run on the head node. - * See the [nextflow docs](https://www.nextflow.io/docs/latest/executor.html) for information about running with other hardware backends. Most job scheduler systems are natively supported. -2. Nextflow will expect all software to be installed and available on the `PATH` - * It's expected to use an additional config profile for docker, singularity or conda support. See below. - -#### 3.1) Software deps: Docker -First, install docker on your system: [Docker Installation Instructions](https://docs.docker.com/engine/installation/) - -Then, running the pipeline with the option `-profile docker` tells Nextflow to enable Docker for this run. An image containing all of the software requirements will be automatically fetched and used from [dockerhub](https://hub.docker.com/r/nfcore/hic). - -#### 3.1) Software deps: Singularity -If you're not able to use Docker then [Singularity](http://singularity.lbl.gov/) is a great alternative. -The process is very similar: running the pipeline with the option `-profile singularity` tells Nextflow to enable singularity for this run. An image containing all of the software requirements will be automatically fetched and used from singularity hub. - -If running offline with Singularity, you'll need to download and transfer the Singularity image first: + * All jobs are run in the login session. If you're using a simple server, +this may be fine. If you're using a compute cluster, this is bad as all jobs +will run on the head node. + * See the +[nextflow docs](https://www.nextflow.io/docs/latest/executor.html) for +information about running with other hardware backends. Most job scheduler +systems are natively supported. +2. Nextflow will expect all software to be installed and available on the +`PATH` + * It's expected to use an additional config profile for docker, singularity +or conda support. See below. + +### 3.1) Software deps: Docker + +First, install docker on your system: +[Docker Installation Instructions](https://docs.docker.com/engine/installation/) + +Then, running the pipeline with the option `-profile docker` tells Nextflow to +enable Docker for this run. An image containing all of the software +requirements will be automatically fetched and used from +[dockerhub](https://hub.docker.com/r/nfcore/hic). + +### 3.1) Software deps: Singularity + +If you're not able to use Docker then +[Singularity](http://singularity.lbl.gov/) is a great alternative. +The process is very similar: running the pipeline with the option +`-profile singularity` tells Nextflow to enable singularity for this run. +An image containing all of the software requirements will be automatically +fetched and used from singularity hub. + +If running offline with Singularity, you'll need to download and transfer the +Singularity image first: ```bash singularity pull --name nf-core-hic.simg shub://nf-core/hic ``` -Once transferred, use `-with-singularity` and specify the path to the image file: +Once transferred, use `-with-singularity` and specify the path to the image +file: ```bash nextflow run /path/to/nf-core-hic -with-singularity nf-core-hic.simg ``` -Remember to pull updated versions of the singularity image if you update the pipeline. +Remember to pull updated versions of the singularity image if you update the +pipeline. +### 3.2) Software deps: conda -#### 3.2) Software deps: conda -If you're not able to use Docker _or_ Singularity, you can instead use conda to manage the software requirements. -This is slower and less reproducible than the above, but is still better than having to install all requirements yourself! -The pipeline ships with a conda environment file and nextflow has built-in support for this. -To use it first ensure that you have conda installed (we recommend [miniconda](https://conda.io/miniconda.html)), then follow the same pattern as above and use the flag `-profile conda` +If you're not able to use Docker _or_ Singularity, you can instead use conda to +manage the software requirements. +This is slower and less reproducible than the above, but is still better than +having to install all requirements yourself! +The pipeline ships with a conda environment file and nextflow has built-in +support for this. +To use it first ensure that you have conda installed (we recommend +[miniconda](https://conda.io/miniconda.html)), then follow the same pattern +as above and use the flag `-profile conda` -#### 3.3) Configuration profiles +### 3.3) Configuration profiles See [`docs/configuration/adding_your_own.md`](configuration/adding_your_own.md) diff --git a/docs/output.md b/docs/output.md index 53c9c0c7c20b11e85acd758e4f7b157116ef2378..a83d0dae9b5a742b799f055163dd7dde2da77712 100644 --- a/docs/output.md +++ b/docs/output.md @@ -1,8 +1,11 @@ # nf-core/hic: Output -This document describes the output produced by the pipeline. Most of the plots are taken from the MultiQC report, which summarises results at the end of the pipeline. +This document describes the output produced by the pipeline. Most of the plots +are taken from the MultiQC report, which summarises results at the end of the +pipeline. ## Pipeline overview + The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes data using the following steps: @@ -10,27 +13,38 @@ and processes data using the following steps: * [Valid pairs detection](#valid-pairs-detection) * [Duplicates removal](#duplicates-removal) * [Contact maps](#contact-maps) -* [MultiQC](#multiqc) - aggregate report and quality controls, describing results of the whole pipeline -* [Export](#exprot) - additionnal export for compatibility with downstream analysis tool and visualization +* [MultiQC](#multiqc) - aggregate report and quality controls, describing +results of the whole pipeline +* [Export](#exprot) - additionnal export for compatibility with downstream +analysis tool and visualization -The current version is mainly based on the [HiC-Pro](https://github.com/nservant/HiC-Pro) pipeline. -For details about the workflow, see [Servant et al. 2015](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-015-0831-x) +The current version is mainly based on the +[HiC-Pro](https://github.com/nservant/HiC-Pro) pipeline. +For details about the workflow, see +[Servant et al. 2015](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-015-0831-x) ## Reads alignment -Using Hi-C data, each reads mate has to be independantly aligned on the reference genome. -The current workflow implements a two steps mapping strategy. First, the reads are aligned using an end-to-end aligner. -Second, reads spanning the ligation junction are trimmmed from their 3' end, and aligned back on the genome. -Aligned reads for both fragment mates are then paired in a single paired-end BAM file. -Singletons are discarded, and multi-hits are filtered according to the configuration parameters (`--rm-multi`). -Note that if the `--dnase` mode is activated, HiC-Pro will skip the second mapping step. +Using Hi-C data, each reads mate has to be independantly aligned on the +reference genome. +The current workflow implements a two steps mapping strategy. First, the reads +are aligned using an end-to-end aligner. +Second, reads spanning the ligation junction are trimmmed from their 3' end, +and aligned back on the genome. +Aligned reads for both fragment mates are then paired in a single paired-end +BAM file. +Singletons are discarded, and multi-hits are filtered according to the +configuration parameters (`--rm-multi`). +Note that if the `--dnase` mode is activated, HiC-Pro will skip the second +mapping step. **Output directory: `results/mapping`** * `*bwt2pairs.bam` - final BAM file with aligned paired data * `*.pairstat` - mapping statistics -if `--saveAlignedIntermediates` is specified, additional mapping file results are available ; +if `--saveAlignedIntermediates` is specified, additional mapping file results +are available ; * `*.bam` - Aligned reads (R1 and R2) from end-to-end alignment * `*_unmap.fastq` - Unmapped reads after end-to-end alignment @@ -39,68 +53,117 @@ if `--saveAlignedIntermediates` is specified, additional mapping file results ar * `*bwt2merged.bam` - merged BAM file after the two-steps alignment * `*.mapstat` - mapping statistics per read mate -Usually, a high fraction of reads is expected to be aligned on the genome (80-90%). Among them, we usually observed a few percent (around 10%) of step 2 aligned reads. Those reads are chimeric fragments for which we detect a ligation junction. An abnormal level of chimeric reads can reflect a ligation issue during the library preparation. -The fraction of singleton or multi-hits depends on the genome complexity and the fraction of unmapped reads. The fraction of singleton is usually close to the sum of unmapped R1 and R2 reads, as it is unlikely that both mates from the same pair were unmapped. +Usually, a high fraction of reads is expected to be aligned on the genome +(80-90%). Among them, we usually observed a few percent (around 10%) of step 2 +aligned reads. Those reads are chimeric fragments for which we detect a +ligation junction. An abnormal level of chimeric reads can reflect a ligation +issue during the library preparation. +The fraction of singleton or multi-hits depends on the genome complexity and +the fraction of unmapped reads. The fraction of singleton is usually close to +the sum of unmapped R1 and R2 reads, as it is unlikely that both mates from the +same pair were unmapped. ## Valid pairs detection -Each aligned reads can be assigned to one restriction fragment according to the reference genome and the digestion protocol. +Each aligned reads can be assigned to one restriction fragment according to the +reference genome and the digestion protocol. Invalid pairs are classified as follow: -* Dangling end, i.e. unligated fragments (both reads mapped on the same restriction fragment) -* Self circles, i.e. fragments ligated on themselves (both reads mapped on the same restriction fragment in inverted orientation) -* Religation, i.e. ligation of juxtaposed fragments -* Filtered pairs, i.e. any pairs that do not match the filtering criteria on inserts size, restriction fragments size -* Dumped pairs, i.e. any pairs for which we were not able to reconstruct the ligation product. - -Only valid pairs involving two different restriction fragments are used to build the contact maps. -Duplicated valid pairs associated to PCR artefacts are discarded (see `--rm_dup`. -In case of Hi-C protocols that do not require a restriction enzyme such as DNase Hi-C or micro Hi-C, the assignment to a restriction is not possible (see `--dnase`). -Short range interactions that are likely to be spurious ligation products can thus be discarded using the `--min_cis_dist` parameter. +* Dangling end, i.e. unligated fragments (both reads mapped on the same +restriction fragment) +* Self circles, i.e. fragments ligated on themselves (both reads mapped on the +same restriction fragment in inverted orientation) +* Religation, i.e. ligation of juxtaposed fragments +* Filtered pairs, i.e. any pairs that do not match the filtering criteria on +inserts size, restriction fragments size +* Dumped pairs, i.e. any pairs for which we were not able to reconstruct the +ligation product. + +Only valid pairs involving two different restriction fragments are used to +build the contact maps. +Duplicated valid pairs associated to PCR artefacts are discarded +(see `--rm_dup`). + +In case of Hi-C protocols that do not require a restriction enzyme such as +DNase Hi-C or micro Hi-C, the assignment to a restriction is not possible +(see `--dnase`). +Short range interactions that are likely to be spurious ligation products +can thus be discarded using the `--min_cis_dist` parameter. * `*.validPairs` - List of valid ligation products +* `*.DEpairs` - List of dangling-end products +* `*.SCPairs` - List of self-circle products +* `*.REPairs` - List of religation products +* `*.FiltPairs` - List of filtered pairs * `*RSstat` - Statitics of number of read pairs falling in each category The validPairs are stored using a simple tab-delimited text format ; ```bash -read name / chr_reads1 / pos_reads1 / strand_reads1 / chr_reads2 / pos_reads2 / strand_reads2 / fragment_size / res frag name R1 / res frag R2 / mapping qual R1 / mapping qual R2 [/ allele_specific_tag] +read name / chr_reads1 / pos_reads1 / strand_reads1 / chr_reads2 / pos_reads2 / +strand_reads2 / fragment_size / res frag name R1 / res frag R2 / mapping qual R1 +/ mapping qual R2 [/ allele_specific_tag] ``` -The ligation efficiency can be assessed using the filtering of valid and invalid pairs. As the ligation is a random process, 25% of each valid ligation class is expected. In the same way, a high level of dangling-end or self-circle read pairs is associated with a low quality experiment, and reveals a problem during the digestion, fill-in or ligation steps. +The ligation efficiency can be assessed using the filtering of valid and +invalid pairs. As the ligation is a random process, 25% of each valid ligation +class is expected. In the same way, a high level of dangling-end or self-circle +read pairs is associated with a low quality experiment, and reveals a problem +during the digestion, fill-in or ligation steps. -In the context of Hi-C protocol without restriction enzyme, this analysis step is skipped. The aligned pairs are therefore directly used to generate the contact maps. A filter of the short range contact (typically <1kb) is recommanded as this pairs are likely to be self ligation products. +In the context of Hi-C protocol without restriction enzyme, this analysis step +is skipped. The aligned pairs are therefore directly used to generate the +contact maps. A filter of the short range contact (typically <1kb) is +recommanded as this pairs are likely to be self ligation products. ## Duplicates removal Note that validPairs file are generated per reads chunck. -These files are then merged in the allValidPairs file, and duplicates are removed if the `--rm_dup` parameter is used. +These files are then merged in the allValidPairs file, and duplicates are +removed if the `--rm_dup` parameter is used. * `*allValidPairs` - combined valid pairs from all read chunks * `*mergestat` - statistics about duplicates removal and valid pairs information -Additional quality controls such as fragment size distribution can be extracted from the list of valid interaction products. -We usually expect to see a distribution centered around 300 pb which correspond to the paired-end insert size commonly used. -The fraction of dplicates is also presented. A high level of duplication indicates a poor molecular complexity and a potential PCR bias. -Finaly, an important metric is to look at the fraction of intra and inter-chromosomal interactions, as well as long range (>20kb) versus short range (<20kb) intra-chromosomal interactions. +Additional quality controls such as fragment size distribution can be extracted +from the list of valid interaction products. +We usually expect to see a distribution centered around 300 pb which correspond +to the paired-end insert size commonly used. +The fraction of dplicates is also presented. A high level of duplication +indicates a poor molecular complexity and a potential PCR bias. +Finaly, an important metric is to look at the fraction of intra and +inter-chromosomal interactions, as well as long range (>20kb) versus short +range (<20kb) intra-chromosomal interactions. ## Contact maps Intra et inter-chromosomal contact maps are build for all specified resolutions. -The genome is splitted into bins of equal size. Each valid interaction is associated with the genomic bins to generate the raw maps. -In addition, Hi-C data can contain several sources of biases which has to be corrected. -The current workflow uses the [ìced](https://github.com/hiclib/iced) and [Varoquaux and Servant, 2018](http://joss.theoj.org/papers/10.21105/joss.01286) python package which proposes a fast implementation of the original ICE normalization algorithm (Imakaev et al. 2012), making the assumption of equal visibility of each fragment. +The genome is splitted into bins of equal size. Each valid interaction is +associated with the genomic bins to generate the raw maps. +In addition, Hi-C data can contain several sources of biases which has to be +corrected. +The current workflow uses the [ìced](https://github.com/hiclib/iced) and +[Varoquaux and Servant, 2018](http://joss.theoj.org/papers/10.21105/joss.01286) +python package which proposes a fast implementation of the original ICE +normalization algorithm (Imakaev et al. 2012), making the assumption of equal +visibility of each fragment. * `*.matrix` - genome-wide contact maps * `*_iced.matrix` - genome-wide iced contact maps -The contact maps are generated for all specified resolution (see `--bin_size` argument) +The contact maps are generated for all specified resolution +(see `--bin_size` argument) A contact map is defined by : + * A list of genomic intervals related to the specified resolution (BED format). * A matrix, stored as standard triplet sparse format (i.e. list format). -Based on the observation that a contact map is symmetric and usually sparse, only non-zero values are stored for half of the matrix. The user can specified if the 'upper', 'lower' or 'complete' matrix has to be stored. The 'asis' option allows to store the contacts as they are observed from the valid pairs files. +Based on the observation that a contact map is symmetric and usually sparse, +only non-zero values are stored for half of the matrix. The user can specified +if the 'upper', 'lower' or 'complete' matrix has to be stored. The 'asis' +option allows to store the contacts as they are observed from the valid pairs +files. ```bash A B 10 @@ -109,19 +172,27 @@ Based on the observation that a contact map is symmetric and usually sparse, onl (...) ``` -This format is memory efficient, and is compatible with several software for downstream analysis. +This format is memory efficient, and is compatible with several software for +downstream analysis. ## MultiQC -[MultiQC](http://multiqc.info) is a visualisation tool that generates a single HTML report summarising all samples in your project. Most of the pipeline QC results are visualised in the report and further statistics are available in within the report data directory. +[MultiQC](http://multiqc.info) is a visualisation tool that generates a single +HTML report summarising all samples in your project. Most of the pipeline QC +results are visualised in the report and further statistics are available in +within the report data directory. -The pipeline has special steps which allow the software versions used to be reported in the MultiQC output for future traceability. +The pipeline has special steps which allow the software versions used to be +reported in the MultiQC output for future traceability. **Output directory: `results/multiqc`** * `Project_multiqc_report.html` - * MultiQC report - a standalone HTML file that can be viewed in your web browser + * MultiQC report - a standalone HTML file that can be viewed in your +web browser * `Project_multiqc_data/` - * Directory containing parsed statistics from the different tools used in the pipeline + * Directory containing parsed statistics from the different tools used +in the pipeline -For more information about how to use MultiQC reports, see [http://multiqc.info](http://multiqc.info) +For more information about how to use MultiQC reports, see +[http://multiqc.info](http://multiqc.info) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index e0f2d0774afa327390d3e3cb33c7c3b1e6c829fb..df43e8a755881b646991815e75e159069972c459 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -2,11 +2,14 @@ ## Input files not found -If only no file, only one input file , or only read one and not read two is picked up then something is wrong with your input file declaration +If only no file, only one input file , or only read one and not read two is +picked up then something is wrong with your input file declaration 1. The path must be enclosed in quotes (`'` or `"`) -2. The path must have at least one `*` wildcard character. This is even if you are only running one paired end sample. -3. When using the pipeline with paired end data, the path must use `{1,2}` or `{R1,R2}` notation to specify read pairs. +2. The path must have at least one `*` wildcard character. This is even if +you are only running one paired end sample. +3. When using the pipeline with paired end data, the path must use `{1,2}` or +`{R1,R2}` notation to specify read pairs. 4. If you are running Single end data make sure to specify `--singleEnd` If the pipeline can't find your files then you will get the following error @@ -15,14 +18,26 @@ If the pipeline can't find your files then you will get the following error ERROR ~ Cannot find any reads matching: *{1,2}.fastq.gz ``` -Note that if your sample name is "messy" then you have to be very particular with your glob specification. A file name like `L1-1-D-2h_S1_L002_R1_001.fastq.gz` can be difficult enough for a human to read. Specifying `*{1,2}*.gz` wont work give you what you want Whilst `*{R1,R2}*.gz` will. - +Note that if your sample name is "messy" then you have to be very particular +with your glob specification. A file name like +`L1-1-D-2h_S1_L002_R1_001.fastq.gz` can be difficult enough for a human to +read. Specifying `*{1,2}*.gz` wont work whilst `*{R1,R2}*.gz` will. ## Data organization -The pipeline can't take a list of multiple input files - it takes a glob expression. If your input files are scattered in different paths then we recommend that you generate a directory with symlinked files. If running in paired end mode please make sure that your files are sensibly named so that they can be properly paired. See the previous point. + +The pipeline can't take a list of multiple input files - it takes a glob +expression. If your input files are scattered in different paths then we +recommend that you generate a directory with symlinked files. If running +in paired end mode please make sure that your files are sensibly named so +that they can be properly paired. See the previous point. ## Extra resources and getting help -If you still have an issue with running the pipeline then feel free to contact us. -Have a look at the [pipeline website](https://github.com/nf-core/hic) to find out how. -If you have problems that are related to Nextflow and not our pipeline then check out the [Nextflow gitter channel](https://gitter.im/nextflow-io/nextflow) or the [google group](https://groups.google.com/forum/#!forum/nextflow). +If you still have an issue with running the pipeline then feel free to +contact us. +Have a look at the [pipeline website](https://github.com/nf-core/hic) to +find out how. + +If you have problems that are related to Nextflow and not our pipeline then +check out the [Nextflow gitter channel](https://gitter.im/nextflow-io/nextflow) +or the [google group](https://groups.google.com/forum/#!forum/nextflow). diff --git a/docs/usage.md b/docs/usage.md index d166cf6cf2c345c72af52e9c56d436302d6e8e1d..57f1e3edb293ca20830c41c145f06eb03c5e5d30 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -51,6 +51,7 @@ * [`--splitFastq`](#--splitFastq) * [`--saveReference`](#--saveReference) * [`--saveAlignedIntermediates`](#--saveAlignedIntermediates) + * [`--saveInteractionBAM`](#--saveInteractionBAM) * [Skip options](#skip-options) * [--skip_maps](#--skip_maps) * [--skip_ice](#--skip_ice) @@ -76,24 +77,32 @@ * [`--plaintext_email`](#--plaintext_email) * [`--multiqc_config`](#--multiqc_config) - ## General Nextflow info -Nextflow handles job submissions on SLURM or other environments, and supervises running the jobs. Thus the Nextflow process must run until the pipeline is finished. We recommend that you put the process running in the background through `screen` / `tmux` or similar tool. Alternatively you can run nextflow within a cluster job submitted your job scheduler. -It is recommended to limit the Nextflow Java virtual machines memory. We recommend adding the following line to your environment (typically in `~/.bashrc` or `~./bash_profile`): +Nextflow handles job submissions on SLURM or other environments, and supervises +running the jobs. Thus the Nextflow process must run until the pipeline is +finished. We recommend that you put the process running in the background +through `screen` / `tmux` or similar tool. Alternatively you can run nextflow +within a cluster job submitted your job scheduler. + +It is recommended to limit the Nextflow Java virtual machines memory. +We recommend adding the following line to your environment (typically +in `~/.bashrc` or `~./bash_profile`): ```bash NXF_OPTS='-Xms1g -Xmx4g' ``` ## Running the pipeline + The typical command for running the pipeline is as follows: ```bash -nextflow run nf-core/hic --reads '*_R{1,2}.fastq.gz' -genome GRCh37 -profile docker +nextflow run nf-core/hic --reads '*_R{1,2}.fastq.gz' --genome GRCh37 -profile docker ``` -This will launch the pipeline with the `docker` configuration profile. See below for more information about profiles. +This will launch the pipeline with the `docker` configuration profile. +See below for more information about profiles. Note that the pipeline will create the following files in your working directory: @@ -105,26 +114,46 @@ results # Finished results (configurable, see below) ``` ### Updating the pipeline -When you run the above command, Nextflow automatically pulls the pipeline code from GitHub and stores it as a cached version. When running the pipeline after this, it will always use the cached version if available - even if the pipeline has been updated since. To make sure that you're running the latest version of the pipeline, make sure that you regularly update the cached version of the pipeline: + +When you run the above command, Nextflow automatically pulls the pipeline code +from GitHub and stores it as a cached version. When running the pipeline after +this, it will always use the cached version if available - even if the pipeline +has been updated since. To make sure that you're running the latest version of +the pipeline, make sure that you regularly update the cached version of the +pipeline: ```bash nextflow pull nf-core/hic ``` ### Reproducibility -It's a good idea to specify a pipeline version when running the pipeline on your data. This ensures that a specific version of the pipeline code and software are used when you run your pipeline. If you keep using the same tag, you'll be running the same version of the pipeline, even if there have been changes to the code since. -First, go to the [nf-core/hic releases page](https://github.com/nf-core/hic/releases) and find the latest version number - numeric only (eg. `1.3.1`). Then specify this when running the pipeline with `-r` (one hyphen) - eg. `-r 1.3.1`. +It's a good idea to specify a pipeline version when running the pipeline on +your data. This ensures that a specific version of the pipeline code and +software are used when you run your pipeline. If you keep using the same tag, +you'll be running the same version of the pipeline, even if there have been +changes to the code since. -This version number will be logged in reports when you run the pipeline, so that you'll know what you used when you look back in the future. +First, go to the +[nf-core/hic releases page](https://github.com/nf-core/hic/releases) and find +the latest version number - numeric only (eg. `1.3.1`). +Then specify this when running the pipeline with `-r` (one hyphen) +eg. `-r 1.3.1`. +This version number will be logged in reports when you run the pipeline, so +that you'll know what you used when you look back in the future. ## Main arguments ### `-profile` -Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments. Note that multiple profiles can be loaded, for example: `-profile docker` - the order of arguments is important! -If `-profile` is not specified at all the pipeline will be run locally and expects all software to be installed and available on the `PATH`. +Use this parameter to choose a configuration profile. Profiles can give +configuration presets for different compute environments. Note that multiple +profiles can be loaded, for example: `-profile docker` - the order of arguments +is important! + +If `-profile` is not specified at all the pipeline will be run locally and +expects all software to be installed and available on the `PATH`. * `awsbatch` * A generic configuration profile to be used with AWS Batch. @@ -142,6 +171,7 @@ If `-profile` is not specified at all the pipeline will be run locally and expec * Includes links to test data so needs no other parameters ### `--reads` + Use this to specify the location of your input FastQ files. For example: ```bash @@ -152,18 +182,26 @@ Please note the following requirements: 1. The path must be enclosed in quotes 2. The path must have at least one `*` wildcard character -3. When using the pipeline with paired end data, the path must use `{1,2}` notation to specify read pairs. +3. When using the pipeline with paired end data, the path must use `{1,2}` +notation to specify read pairs. If left unspecified, a default pattern is used: `data/*{1,2}.fastq.gz` ## Reference genomes and annotation files -The pipeline config files come bundled with paths to the illumina iGenomes reference index files. If running with docker or AWS, the configuration is set up to use the [AWS-iGenomes](https://ewels.github.io/AWS-iGenomes/) resource. +The pipeline config files come bundled with paths to the illumina iGenomes +reference index files. If running with docker or AWS, the configuration is +set up to use the [AWS-iGenomes](https://ewels.github.io/AWS-iGenomes/) +resource. ### `--genome` (using iGenomes) -There are 31 different species supported in the iGenomes references. To run the pipeline, you must specify which to use with the `--genome` flag. -You can find the keys to specify the genomes in the [iGenomes config file](../conf/igenomes.config). Common genomes that are supported are: +There are 31 different species supported in the iGenomes references. To run +the pipeline, you must specify which to use with the `--genome` flag. + +You can find the keys to specify the genomes in the +[iGenomes config file](../conf/igenomes.config). +Common genomes that are supported are: * Human * `--genome GRCh37` @@ -176,11 +214,13 @@ You can find the keys to specify the genomes in the [iGenomes config file](../co > There are numerous others - check the config file for more. -Note that you can use the same configuration setup to save sets of reference files for your own use, even if they are not part of the iGenomes resource. See the [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) for instructions on where to save such a file. +Note that you can use the same configuration setup to save sets of reference +files for your own use, even if they are not part of the iGenomes resource. +See the [Nextflow documentation](https://www.nextflow.io/docs/latest/config.html) +for instructions on where to save such a file. The syntax for this reference configuration is as follows: - ```nextflow params { genomes { @@ -194,18 +234,26 @@ params { ``` ### `--fasta` -If you prefer, you can specify the full path to your reference genome when you run the pipeline: + +If you prefer, you can specify the full path to your reference genome when you +run the pipeline: ```bash --fasta '[path to Fasta reference]' ``` ### `--igenomesIgnore` -Do not load `igenomes.config` when running the pipeline. You may choose this option if you observe clashes between custom parameters and those supplied in `igenomes.config`. + +Do not load `igenomes.config` when running the pipeline. You may choose this +option if you observe clashes between custom parameters and those supplied +in `igenomes.config`. ### `--bwt2_index` -The bowtie2 indexes are required to run the Hi-C pipeline. If the `--bwt2_index` is not specified, the pipeline will either use the igenome bowtie2 indexes (see `--genome` option) or build the indexes on-the-fly (see `--fasta` option) +The bowtie2 indexes are required to run the Hi-C pipeline. If the +`--bwt2_index` is not specified, the pipeline will either use the igenome +bowtie2 indexes (see `--genome` option) or build the indexes on-the-fly +(see `--fasta` option) ```bash --bwt2_index '[path to bowtie2 index (with basename)]' @@ -213,8 +261,10 @@ The bowtie2 indexes are required to run the Hi-C pipeline. If the `--bwt2_index` ### `--chromosome_size` -The Hi-C pipeline will also requires a two-columns text file with the chromosome name and its size (tab separated). -If not specified, this file will be automatically created by the pipeline. In the latter case, the `--fasta` reference genome has to be specified. +The Hi-C pipeline will also requires a two-columns text file with the +chromosome name and its size (tab separated). +If not specified, this file will be automatically created by the pipeline. +In the latter case, the `--fasta` reference genome has to be specified. ```bash chr1 249250621 @@ -236,7 +286,8 @@ If not specified, this file will be automatically created by the pipeline. In th ### `--restriction_fragments` -Finally, Hi-C experiments based on restriction enzyme digestion requires a BED file with coordinates of restriction fragments. +Finally, Hi-C experiments based on restriction enzyme digestion requires a BED +file with coordinates of restriction fragments. ```bash chr1 0 16007 HIC_chr1_1 0 + @@ -252,22 +303,30 @@ Finally, Hi-C experiments based on restriction enzyme digestion requires a BED f (...) ``` -If not specified, this file will be automatically created by the pipline. In this case, the `--fasta` reference genome will be used. +If not specified, this file will be automatically created by the pipline. +In this case, the `--fasta` reference genome will be used. Note that the `--restriction_site` parameter is mandatory to create this file. ## Hi-C specific options -The following options are defined in the `hicpro.config` file, and can be updated either using a custom configuration file (see `-c` option) or using command line parameter. +The following options are defined in the `hicpro.config` file, and can be +updated either using a custom configuration file (see `-c` option) or using +command line parameter. ### Reads mapping -The reads mapping is currently based on the two-steps strategy implemented in the HiC-pro pipeline. The idea is to first align reads from end-to-end. -Reads that do not aligned are then trimmed at the ligation site, and their 5' end is re-aligned to the reference genome. -Note that the default option are quite stringent, and can be updated according to the reads quality or the reference genome. +The reads mapping is currently based on the two-steps strategy implemented in +the HiC-pro pipeline. The idea is to first align reads from end-to-end. +Reads that do not aligned are then trimmed at the ligation site, and their 5' +end is re-aligned to the reference genome. +Note that the default option are quite stringent, and can be updated according +to the reads quality or the reference genome. #### `--bwt2_opts_end2end` -Bowtie2 alignment option for end-to-end mapping. Default: '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder' +Bowtie2 alignment option for end-to-end mapping. +Default: '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end +--reorder' ```bash --bwt2_opts_end2end '[Options for bowtie2 step1 mapping on full reads]' @@ -275,7 +334,9 @@ Bowtie2 alignment option for end-to-end mapping. Default: '--very-sensitive -L 3 #### `--bwt2_opts_trimmed` -Bowtie2 alignment option for trimmed reads mapping (step 2). Default: '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder' +Bowtie2 alignment option for trimmed reads mapping (step 2). +Default: '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end +--reorder' ```bash --bwt2_opts_trimmed '[Options for bowtie2 step2 mapping on trimmed reads]' @@ -293,15 +354,20 @@ Minimum mapping quality. Reads with lower quality are discarded. Default: 10 #### `--restriction_site` -Restriction motif(s) for Hi-C digestion protocol. The restriction motif(s) is(are) used to generate the list of restriction fragments. -The precise cutting site of the restriction enzyme has to be specified using the '^' character. Default: 'A^AGCTT' +Restriction motif(s) for Hi-C digestion protocol. The restriction motif(s) +is(are) used to generate the list of restriction fragments. +The precise cutting site of the restriction enzyme has to be specified using +the '^' character. Default: 'A^AGCTT' Here are a few examples: -* MboI: '^GATC' -* DpnII: '^GATC' -* BglII: 'A^GATCT' -* HindIII: 'A^AGCTT' -Note that multiples restriction motifs can be provided (comma-separated). +* MboI: ^GATC +* DpnII: ^GATC +* BglII: A^GATCT +* HindIII: A^AGCTT +* ARIMA kit: ^GATC,^GANT + +Note that multiples restriction motifs can be provided (comma-separated) and +that 'N' base are supported. ```bash --restriction_size '[Cutting motif]' @@ -309,16 +375,22 @@ Note that multiples restriction motifs can be provided (comma-separated). #### `--ligation_site` -Ligation motif after reads ligation. This motif is used for reads trimming and depends on the fill in strategy. -Note that multiple ligation sites can be specified. Default: 'AAGCTAGCTT' +Ligation motif after reads ligation. This motif is used for reads trimming and +depends on the fill in strategy. +Note that multiple ligation sites can be specified (comma separated) and that +'N' base is interpreted and replaced by 'A','C','G','T'. +Default: 'AAGCTAGCTT' ```bash --ligation_site '[Ligation motif]' ``` +Exemple of the ARIMA kit: GATCGATC,GATCGANT,GANTGATC,GANTGANT + #### `--min_restriction_fragment_size` -Minimum size of restriction fragments to consider for the Hi-C processing. Default: '' +Minimum size of restriction fragments to consider for the Hi-C processing. +Default: '' ```bash --min_restriction_fragment_size '[numeric]' @@ -326,7 +398,8 @@ Minimum size of restriction fragments to consider for the Hi-C processing. Defau #### `--max_restriction_fragment_size` -Maximum size of restriction fragments to consider for the Hi-C processing. Default: '' +Maximum size of restriction fragments to consider for the Hi-C processing. +Default: '' ```bash --max_restriction_fragment_size '[numeric]' @@ -334,7 +407,8 @@ Maximum size of restriction fragments to consider for the Hi-C processing. Defau #### `--min_insert_size` -Minimum reads insert size. Shorter 3C products are discarded. Default: '' +Minimum reads insert size. Shorter 3C products are discarded. +Default: '' ```bash --min_insert_size '[numeric]' @@ -342,7 +416,8 @@ Minimum reads insert size. Shorter 3C products are discarded. Default: '' #### `--max_insert_size` -Maximum reads insert size. Longer 3C products are discarded. Default: '' +Maximum reads insert size. Longer 3C products are discarded. +Default: '' ```bash --max_insert_size '[numeric]' @@ -352,8 +427,10 @@ Maximum reads insert size. Longer 3C products are discarded. Default: '' #### `--dnase` -In DNAse Hi-C mode, all options related to digestion Hi-C (see previous section) are ignored. -In this case, it is highly recommanded to use the `--min_cis_dist` parameter to remove spurious ligation products. +In DNAse Hi-C mode, all options related to digestion Hi-C +(see previous section) are ignored. +In this case, it is highly recommanded to use the `--min_cis_dist` parameter +to remove spurious ligation products. ```bash --dnase' @@ -363,7 +440,8 @@ In this case, it is highly recommanded to use the `--min_cis_dist` parameter to #### `--min_cis_dist` -Filter short range contact below the specified distance. Mainly useful for DNase Hi-C. Default: '' +Filter short range contact below the specified distance. +Mainly useful for DNase Hi-C. Default: '' ```bash --min_cis_dist '[numeric]' @@ -387,7 +465,9 @@ If specified, duplicates reads are discarded before building contact maps. #### `--rm_multi` -If specified, reads that aligned multiple times on the genome are discarded. Note the default mapping options are based on random hit assignment, meaning that only one position is kept per read. +If specified, reads that aligned multiple times on the genome are discarded. +Note the default mapping options are based on random hit assignment, meaning +that only one position is kept per read. ```bash --rm_multi @@ -395,41 +475,46 @@ If specified, reads that aligned multiple times on the genome are discarded. Not ## Genome-wide contact maps -#### `--bin_size` +### `--bin_size` -Resolution of contact maps to generate (space separated). Default:'1000000,500000' +Resolution of contact maps to generate (space separated). +Default:'1000000,500000' ```bash --bins_size '[numeric]' ``` -#### `--ice_max_iter` +### `--ice_max_iter` -Maximum number of iteration for ICE normalization. Default: 100 +Maximum number of iteration for ICE normalization. +Default: 100 ```bash --ice_max_iter '[numeric]' ``` -#### `--ice_filer_low_count_perc` +### `--ice_filer_low_count_perc` -Define which pourcentage of bins with low counts should be force to zero. Default: 0.02 +Define which pourcentage of bins with low counts should be force to zero. +Default: 0.02 ```bash --ice_filter_low_count_perc '[numeric]' ``` -#### `--ice_filer_high_count_perc` +### `--ice_filer_high_count_perc` -Define which pourcentage of bins with low counts should be discarded before normalization. Default: 0 +Define which pourcentage of bins with low counts should be discarded before +normalization. Default: 0 ```bash --ice_filter_high_count_perc '[numeric]' ``` -#### `--ice_eps` +### `--ice_eps` -The relative increment in the results before declaring convergence for ICE normalization. Default: 0.1 +The relative increment in the results before declaring convergence for ICE +normalization. Default: 0.1 ```bash --ice_eps '[numeric]' @@ -437,50 +522,64 @@ The relative increment in the results before declaring convergence for ICE norma ## Inputs/Outputs -#### `--splitFastq` +### `--splitFastq` -By default, the nf-core Hi-C pipeline expects one read pairs per sample. However, for large Hi-C data processing single fastq files can be very time consuming. -The `--splitFastq` option allows to automatically split input read pairs into chunks of reads. In this case, all chunks will be processed in parallel and merged before generating the contact maps, thus leading to a significant increase of processing performance. +By default, the nf-core Hi-C pipeline expects one read pairs per sample. +However, for large Hi-C data processing single fastq files can be very +time consuming. +The `--splitFastq` option allows to automatically split input read pairs +into chunks of reads. In this case, all chunks will be processed in parallel +and merged before generating the contact maps, thus leading to a significant +increase of processing performance. ```bash --splitFastq '[Number of reads per chunk]' ``` -#### `--saveReference` +### `--saveReference` -If specified, annotation files automatically generated from the `--fasta` file are exported in the results folder. Default: false +If specified, annotation files automatically generated from the `--fasta` file +are exported in the results folder. Default: false ```bash --saveReference ``` -#### `--saveAlignedIntermediates` +### `--saveAlignedIntermediates` -If specified, all intermediate mapping files are saved and exported in the results folder. Default: false +If specified, all intermediate mapping files are saved and exported in the +results folder. Default: false ```bash --saveReference ``` +### `--saveInteractionBAM` + +If specified, write a BAM file with all classified reads (valid paires, +dangling end, self-circle, etc.) and its tags. + ## Skip options -#### `--skip_maps` +### `--skip_maps` -If defined, the workflow stops with the list of valid interactions, and the genome-wide maps are not built. Usefult for capture-C analysis. Default: false +If defined, the workflow stops with the list of valid interactions, and the +genome-wide maps are not built. Usefult for capture-C analysis. Default: false ```bash --skip_maps ``` -#### `--skip_ice` +### `--skip_ice` -If defined, the ICE normalization is not run on the raw contact maps. Default: false +If defined, the ICE normalization is not run on the raw contact maps. +Default: false ```bash --skip_ice ``` -#### `--skip_cool` +### `--skip_cool` If defined, cooler files are not generated. Default: false @@ -488,7 +587,7 @@ If defined, cooler files are not generated. Default: false --skip_cool ``` -#### `--skip_multiqc` +### `--skip_multiqc` If defined, the MultiQC report is not generated. Default: false @@ -497,48 +596,92 @@ If defined, the MultiQC report is not generated. Default: false ``` ## Job resources + ### Automatic resubmission -Each step in the pipeline has a default set of requirements for number of CPUs, memory and time. For most of the steps in the pipeline, if the job exits with an error code of `143` (exceeded requested resources) it will automatically resubmit with higher requests (2 x original, then 3 x original). If it still fails after three times then the pipeline is stopped. -### Custom resource requests -Wherever process-specific requirements are set in the pipeline, the default value can be changed by creating a custom config file. See the files hosted at [`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf) for examples. +Each step in the pipeline has a default set of requirements for number of CPUs, +memory and time. For most of the steps in the pipeline, if the job exits with +an error code of `143` (exceeded requested resources) it will automatically +resubmit with higher requests (2 x original, then 3 x original). If it still +fails after three times then the pipeline is stopped. -If you are likely to be running `nf-core` pipelines regularly it may be a good idea to request that your custom config file is uploaded to the `nf-core/configs` git repository. Before you do this please can you test that the config file works with your pipeline of choice using the `-c` parameter (see definition below). You can then create a pull request to the `nf-core/configs` repository with the addition of your config file, associated documentation file (see examples in [`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs)), and amending [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) to include your custom profile. +### Custom resource requests -If you have any questions or issues please send us a message on [`Slack`](https://nf-core-invite.herokuapp.com/). +Wherever process-specific requirements are set in the pipeline, the default +value can be changed by creating a custom config file. +See the files hosted at +[`nf-core/configs`](https://github.com/nf-core/configs/tree/master/conf) +for examples. + +If you are likely to be running `nf-core` pipelines regularly it may be a good +idea to request that your custom config file is uploaded to the +`nf-core/configs` git repository. Before you do this please can you test that +the config file works with your pipeline of choice using the `-c` parameter +(see definition below). You can then create a pull request to the +`nf-core/configs` repository with the addition of your config file, associated +documentation file (see examples in +[`nf-core/configs/docs`](https://github.com/nf-core/configs/tree/master/docs)), +and amending [`nfcore_custom.config`](https://github.com/nf-core/configs/blob/master/nfcore_custom.config) +to include your custom profile. + +If you have any questions or issues please send us a message on +[`Slack`](https://nf-core-invite.herokuapp.com/). ## AWS Batch specific parameters -Running the pipeline on AWS Batch requires a couple of specific parameters to be set according to your AWS Batch configuration. Please use the `-awsbatch` profile and then specify all of the following parameters. + +Running the pipeline on AWS Batch requires a couple of specific parameters to +be set according to your AWS Batch configuration. Please use the `-awsbatch` +profile and then specify all of the following parameters. + ### `--awsqueue` + The JobQueue that you intend to use on AWS Batch. + ### `--awsregion` -The AWS region to run your job in. Default is set to `eu-west-1` but can be adjusted to your needs. -Please make sure to also set the `-w/--work-dir` and `--outdir` parameters to a S3 storage bucket of your choice - you'll get an error message notifying you if you didn't. +The AWS region to run your job in. Default is set to `eu-west-1` but can be +adjusted to your needs. + +Please make sure to also set the `-w/--work-dir` and `--outdir` parameters to +a S3 storage bucket of your choice - you'll get an error message notifying you +if you didn't. ## Other command line parameters ### `--outdir` + The output directory where the results will be saved. ### `--email` -Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits. If set in your user config file (`~/.nextflow/config`) then you don't need to speicfy this on the command line for every run. + +Set this parameter to your e-mail address to get a summary e-mail with details +of the run sent to you when the workflow exits. If set in your user config file +(`~/.nextflow/config`) then you don't need to speicfy this on the command line +for every run. ### `-name` -Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic. -This is used in the MultiQC report (if not default) and in the summary HTML / e-mail (always). +Name for the pipeline run. If not specified, Nextflow will automatically generate +a random mnemonic. + +This is used in the MultiQC report (if not default) and in the summary HTML / +e-mail (always). **NB:** Single hyphen (core Nextflow option) ### `-resume` -Specify this when restarting a pipeline. Nextflow will used cached results from any pipeline steps where the inputs are the same, continuing from where it got to previously. -You can also supply a run name to resume a specific run: `-resume [run-name]`. Use the `nextflow log` command to show previous run names. +Specify this when restarting a pipeline. Nextflow will used cached results from +any pipeline steps where the inputs are the same, continuing from where it got +to previously. + +You can also supply a run name to resume a specific run: `-resume [run-name]`. +Use the `nextflow log` command to show previous run names. **NB:** Single hyphen (core Nextflow option) ### `-c` + Specify the path to a specific config file (this is a core NextFlow command). **NB:** Single hyphen (core Nextflow option) @@ -546,7 +689,10 @@ Specify the path to a specific config file (this is a core NextFlow command). Note - you can use this to override pipeline defaults. ### `--custom_config_version` -Provide git commit id for custom Institutional configs hosted at `nf-core/configs`. This was implemented for reproducibility purposes. Default is set to `master`. + +Provide git commit id for custom Institutional configs hosted at +`nf-core/configs`. This was implemented for reproducibility purposes. +Default is set to `master`. ```bash ## Download and use config file with following git commid id @@ -554,19 +700,24 @@ Provide git commit id for custom Institutional configs hosted at `nf-core/config ``` ### `--max_memory` + Use to set a top-limit for the default memory requirement for each process. Should be a string in the format integer-unit. eg. `--max_memory '8.GB'` ### `--max_time` + Use to set a top-limit for the default time requirement for each process. Should be a string in the format integer-unit. eg. `--max_time '2.h'` ### `--max_cpus` + Use to set a top-limit for the default CPU requirement for each process. Should be a string in the format integer-unit. eg. `--max_cpus 1` ### `--plaintext_email` + Set to receive plain-text e-mails instead of HTML formatted. ### `--multiqc_config` + Specify a path to a custom MultiQC configuration file. diff --git a/environment.yml b/environment.yml index 34958b7d3505d0ad73b33fb325ab1f87a0d6f8a3..46dae6523bfe0a6eea9e354f978f77f143aa5fad 100644 --- a/environment.yml +++ b/environment.yml @@ -1,6 +1,6 @@ # You can use this file to create a conda environment for this pipeline: # conda env create -f environment.yml -name: nf-core-hic-1.0.0 +name: nf-core-hic-1.1.0dev channels: - conda-forge - bioconda diff --git a/main.nf b/main.nf index ce29fd5dfa679a2b2c404d553108150ae29cf233..31f73a8549e76b246fe05981c0c2735b5605090e 100644 --- a/main.nf +++ b/main.nf @@ -22,57 +22,60 @@ def helpMessage() { nextflow run nf-core/hic --reads '*_R{1,2}.fastq.gz' -profile conda Mandatory arguments: - --reads Path to input data (must be surrounded with quotes) - -profile Configuration profile to use. Can use multiple (comma separated) - Available: conda, docker, singularity, awsbatch, test and more. - - References: If not specified in the configuration file or you wish to overwrite any of the references. - --genome Name of iGenomes reference - --bwt2_index Path to Bowtie2 index - --fasta Path to Fasta reference - --chromosome_size Path to chromosome size file - --restriction_fragments Path to restriction fragment file (bed) + --reads Path to input data (must be surrounded with quotes) + -profile Configuration profile to use. Can use multiple (comma separated) + Available: conda, docker, singularity, awsbatch, test and more. + + References: If not specified in the configuration file or you wish to overwrite any of the references. + --genome Name of iGenomes reference + --bwt2_index Path to Bowtie2 index + --fasta Path to Fasta reference + --chromosome_size Path to chromosome size file + --restriction_fragments Path to restriction fragment file (bed) + --saveReference Save reference genome to output folder. Default: False + --saveAlignedIntermediates Save intermediates alignment files. Default: False Options: - --bwt2_opts_end2end Options for bowtie2 end-to-end mappinf (first mapping step) - --bwt2_opts_trimmed Options for bowtie2 mapping after ligation site trimming - --min_mapq Minimum mapping quality values to consider - - --restriction_site Cutting motif(s) of restriction enzyme(s) (comma separated) - --ligation_site Ligation motifs to trim (comma separated) - --min_restriction_fragment_size Minimum size of restriction fragments to consider - --max_restriction_framgnet_size Maximum size of restriction fragmants to consider - --min_insert_size Minimum insert size of mapped reads to consider - --max_insert_size Maximum insert size of mapped reads to consider - - --dnase Run DNase Hi-C mode. All options related to restriction fragments are not considered - - --min_cis_dist Minimum intra-chromosomal distance to consider - --rm_singleton Remove singleton reads - --rm_multi Remove multi-mapped reads - --rm_dup Remove duplicates - - --bin_size Bin size for contact maps (comma separated) - --ice_max_iter Maximum number of iteration for ICE normalization - --ice_filter_low_count_perc Percentage of low counts columns/rows to filter before ICE normalization - --ice_filter_high_count_perc Percentage of high counts columns/rows to filter before ICE normalization - --ice_eps Convergence criteria for ICE normalization + --bwt2_opts_end2end Options for bowtie2 end-to-end mappinf (first mapping step). See hic.config for default. + --bwt2_opts_trimmed Options for bowtie2 mapping after ligation site trimming. See hic.config for default. + --min_mapq Minimum mapping quality values to consider. Default: 10 + --restriction_site Cutting motif(s) of restriction enzyme(s) (comma separated). Default: 'A^AGCTT' + --ligation_site Ligation motifs to trim (comma separated). Default: 'AAGCTAGCTT' + --min_restriction_fragment_size Minimum size of restriction fragments to consider. Default: None + --max_restriction_framgnet_size Maximum size of restriction fragmants to consider. Default: None + --min_insert_size Minimum insert size of mapped reads to consider. Default: None + --max_insert_size Maximum insert size of mapped reads to consider. Default: None + --saveInteractionBAM Save BAM file with interaction tags (dangling-end, self-circle, etc.). Default: False + + --dnase Run DNase Hi-C mode. All options related to restriction fragments are not considered. Default: False + + --min_cis_dist Minimum intra-chromosomal distance to consider. Default: None + --rm_singleton Remove singleton reads. Default: true + --rm_multi Remove multi-mapped reads. Default: true + --rm_dup Remove duplicates. Default: true + + --bin_size Bin size for contact maps (comma separated). Default: '1000000,500000' + --ice_max_iter Maximum number of iteration for ICE normalization. Default: 100 + --ice_filter_low_count_perc Percentage of low counts columns/rows to filter before ICE normalization. Default: 0.02 + --ice_filter_high_count_perc Percentage of high counts columns/rows to filter before ICE normalization. Default: 0 + --ice_eps Convergence criteria for ICE normalization. Default: 0.1 Other options: - --splitFastq Size of read chuncks to use to speed up the workflow - --outdir The output directory where the results will be saved - --email Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits - -name Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic. + --splitFastq Size of read chuncks to use to speed up the workflow. Default: None + --outdir The output directory where the results will be saved. Default: './results' + --email Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits. Default: None + -name Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic. Default: None Step options: - --skip_maps Skip generation of contact maps. Useful for capture-C - --skip_ice Skip ICE normalization - --skip_cool Skip generation of cooler files - --skip_multiQC Skip MultiQC + + --skip_maps Skip generation of contact maps. Useful for capture-C. Default: False + --skip_ice Skip ICE normalization. Default: False + --skip_cool Skip generation of cool files. Default: False + --skip_multiQC Skip MultiQC. Default: False AWSBatch options: - --awsqueue The AWSBatch JobQueue that needs to be set when running on AWSBatch - --awsregion The AWS Region for your AWS Batch job to run on + --awsqueue The AWSBatch JobQueue that needs to be set when running on AWSBatch + --awsregion The AWS Region for your AWS Batch job to run on """.stripIndent() } @@ -154,7 +157,7 @@ if ( params.splitFastq ){ raw_reads_full = raw_reads.concat( raw_reads_2 ) raw_reads = raw_reads_full.splitFastq( by: params.splitFastq , file: true) }else{ - raw_reads = raw_reads.concat( raw_reads_2 ) + raw_reads = raw_reads.concat( raw_reads_2 ).dump(tag: "data") } @@ -494,8 +497,8 @@ if (!params.dnase){ set val(oname), file("${prefix}.mapstat") into all_mapstat script: - sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2)/ - tag = prefix.toString() =~/_R1|_val_1/ ? "R1" : "R2" + sample = prefix.toString() - ~/(_R1$|_R2$|_val_1$|_val_2$|_1$|_2$)/ + tag = prefix.toString() =~/_R1$|_val_1$|_1$/ ? "R1" : "R2" oname = prefix.toString() - ~/(\.[0-9]+)$/ """ @@ -535,8 +538,8 @@ if (!params.dnase){ set val(oname), file("${prefix}.mapstat") into all_mapstat script: - sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2)/ - tag = prefix.toString() =~/_R1|_val_1/ ? "R1" : "R2" + sample = prefix.toString() - ~/(_R1$|_R2$|_val_1$|_val_2$|_1$|_2$)/ + tag = prefix.toString() =~/_R1$|_val_1$|_1$/ ? "R1" : "R2" oname = prefix.toString() - ~/(\.[0-9]+)$/ """ @@ -552,6 +555,7 @@ if (!params.dnase){ } } + process combine_mapped_files{ tag "$sample = $r1_prefix + $r2_prefix" publishDir "${params.outdir}/mapping", mode: 'copy', @@ -598,6 +602,10 @@ if (!params.dnase){ output: set val(sample), file("*.validPairs") into valid_pairs set val(sample), file("*.validPairs") into valid_pairs_4cool + set val(sample), file("*.DEPairs") into de_pairs + set val(sample), file("*.SCPairs") into sc_pairs + set val(sample), file("*.REPairs") into re_pairs + set val(sample), file("*.FiltPairs") into filt_pairs set val(sample), file("*RSstat") into all_rsstat script: @@ -611,9 +619,10 @@ if (!params.dnase){ if ("$params.max_insert_size".isInteger()) opts="${opts} -l ${params.max_insert_size}" if ("$params.min_restriction_fragment_size".isInteger()) opts="${opts} -t ${params.min_restriction_fragment_size}" if ("$params.max_restriction_fragment_size".isInteger()) opts="${opts} -m ${params.max_restriction_fragment_size}" + if (params.saveInteractionBAM) opts="${opts} --sam" """ - mapped_2hic_fragments.py -f ${frag_file} -r ${pe_bam} ${opts} + mapped_2hic_fragments.py -f ${frag_file} -r ${pe_bam} --all ${opts} """ } } @@ -706,7 +715,7 @@ process merge_sample { file("mstats/") into all_mstats script: - sample = prefix.toString() - ~/(_R1|_R2|_val_1|_val_2)/ + sample = prefix.toString() - ~/(_R1$|_R2$|_val_1$|_val_2$|_1$|_2$)/ if ( (fstat =~ /.mapstat/) ){ ext = "mmapstat" } if ( (fstat =~ /.pairstat/) ){ ext = "mpairstat" } if ( (fstat =~ /.RSstat/) ){ ext = "mRSstat" } diff --git a/nextflow.config b/nextflow.config index 356f20058f0aa048851d0fd965078417564701b3..f624219755d1ca2dcb4accbdf8479ac09a4348f9 100644 --- a/nextflow.config +++ b/nextflow.config @@ -45,7 +45,7 @@ params { // Container slug. Stable releases should specify release tag! // Developmental code should specify :dev -process.container = 'nfcore/hic:1.0.0' +process.container = 'nfcore/hic:dev' // Load base.config by default for all pipelines includeConfig 'conf/base.config' @@ -102,7 +102,7 @@ manifest { description = 'Analysis of Chromosome Conformation Capture data (Hi-C)' mainScript = 'main.nf' nextflowVersion = '>=0.32.0' - version = '1.0.0' + version = '1.1.0dev' } // Function to ensure that resource requirements don't go beyond