diff --git a/conf/hicpro.config b/conf/hicpro.config
index d969960bcc10e78ace7187f521863dbbe8ea62b8..5d145a174926c5ca9a2279a0519adb955137cf57 100644
--- a/conf/hicpro.config
+++ b/conf/hicpro.config
@@ -13,15 +13,15 @@ params {
        splitFastq = false
        bwt2_opts_end2end = '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'
        bwt2_opts_trimmed = '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'
-       min_mapq = 0       
+       min_mapq = 0
 
        // Digestion Hi-C
        restriction_site = 'A^AGCTT'
        ligation_site = 'AAGCTAGCTT'
-       min_restriction_fragment_size = 100
-       max_restriction_fragment_size = 100000
-       min_insert_size = 100
-       max_insert_size = 600
+       min_restriction_fragment_size = 
+       max_restriction_fragment_size = 
+       min_insert_size = 
+       max_insert_size = 
 
        // Hi-C Processing
        min_cis_dist = 
diff --git a/conf/test.config b/conf/test.config
index 1dd09920231bdd5da123402760d5c858d5d78b92..d8220d289a7dda6a9d134e2e1a14715a2d2c5c74 100644
--- a/conf/test.config
+++ b/conf/test.config
@@ -9,7 +9,7 @@
 
 params {
 
-  config_profile_name = 'Hi-C test data from Dixon et al. (2012)'
+  config_profile_name = 'Hi-C test data from Schalbetter et al. (2017)'
   config_profile_description = 'Minimal test dataset to check pipeline function'
 
   // Limit resources so that this can run on Travis
@@ -19,10 +19,13 @@ params {
   
   // Input data
   readPaths = [
-    ['SRR400264_00', ['https://github.com/nf-core/test-datasets/raw/hic/SRR400264_00_R1.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/hic/SRR400264_00_R2.fastq.gz']],
-    ['SRR400264_01', ['https://github.com/nf-core/test-datasets/raw/hic/SRR400264_01_R1.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/hic/SRR400264_01_R2.fastq.gz']]
-  ]
+    ['SRR4292758_00', ['https://github.com/nf-core/test-datasets/raw/hic/data/SRR4292758_00_R1.fastq.gz', 'https://github.com/nf-core/test-datasets/raw/hic/data/SRR4292758_00_R2.fastq.gz']]
+   ]
 
   // Annotations
-  genome = 'GRCh37'
+  fasta = 'https://github.com/nf-core/test-datasets/raw/hic/reference/W303_SGD_2015_JRIU00000000.fsa'
+
+  // Options
+  skip_cool = true
 }
+
diff --git a/main.nf b/main.nf
index 53798f4aeca7cc2266f93189294c5b4719566dae..8ac8bd57b2e2bb1b2674d2e0fb457a7ae935f877 100644
--- a/main.nf
+++ b/main.nf
@@ -33,11 +33,11 @@ def helpMessage() {
 
     Mandatory arguments:
       --reads				    Path to input data (must be surrounded with quotes)
-      --genome                       	    Name of iGenomes reference
       -profile                      	    Configuration profile to use. Can use multiple (comma separated)
                                     	    Available: conda, docker, singularity, awsbatch, test and more.
 
     References                      	    If not specified in the configuration file or you wish to overwrite any of the references.
+      --genome                              Name of iGenomes reference
       --bwt2_index                     	    Path to Bowtie2 index
       --fasta                       	    Path to Fasta reference
       --chromosome_size             	    Path to chromosome size file
@@ -72,6 +72,10 @@ def helpMessage() {
       --email                       	    Set this parameter to your e-mail address to get a summary e-mail with details of the run sent to you when the workflow exits
       -name                         	    Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic.
 
+    Step options:
+      --skip_cool			    Skip generation of cool files
+      --skip_multiQC			    Skip MultiQC
+
     AWSBatch options:
       --awsqueue			    The AWSBatch JobQueue that needs to be set when running on AWSBatch
       --awsregion                   	    The AWS Region for your AWS Batch job to run on
@@ -138,8 +142,7 @@ if (params.readPaths){
       .from( params.readPaths )
       .map { row -> [ row[0], [file(row[1][0]), file(row[1][1])]] }
       .separate( raw_reads, raw_reads_2 ) { a -> [tuple(a[0], a[1][0]), tuple(a[0], a[1][1])] }
-      .println()
-}else{
+ }else{
 
    raw_reads = Channel.create()
    raw_reads_2 = Channel.create()
@@ -674,6 +677,9 @@ process generate_cool{
    tag "$sample"
    publishDir "${params.outdir}/export/cool", mode: 'copy'
 
+   when:
+      !params.skip_cool
+
    input:
       set val(sample), file(vpairs) from all_valid_pairs_4cool
       file chrsize from chromosome_size_cool.collect()
@@ -694,15 +700,18 @@ process generate_cool{
 process multiqc {
     publishDir "${params.outdir}/MultiQC", mode: 'copy'
 
+    when:
+       !params.skip_multiqc
+
     input:
-    file multiqc_config from ch_multiqc_config
-    file ('input_*/*') from all_mstats.concat(all_mergestat).collect()
-    file ('software_versions/*') from software_versions_yaml
-    file workflow_summary from create_workflow_summary(summary)
+       file multiqc_config from ch_multiqc_config
+       file ('input_*/*') from all_mstats.concat(all_mergestat).collect()
+       file ('software_versions/*') from software_versions_yaml
+       file workflow_summary from create_workflow_summary(summary)
 
     output:
-    file "*multiqc_report.html" into multiqc_report
-    file "*_data"
+       file "*multiqc_report.html" into multiqc_report
+       file "*_data"
 
     script:
     rtitle = custom_runName ? "--title \"$custom_runName\"" : ''
diff --git a/nextflow.config b/nextflow.config
index 0a460404f5d565a029330cc98c960fbaf0fb8fd2..c9f2203d63a5ee598b57039743f627e3a705e519 100644
--- a/nextflow.config
+++ b/nextflow.config
@@ -24,7 +24,8 @@ params {
   readPaths = false
   chromosome_size = false
   restriction_fragments = false
-
+  skip_cool = false
+  skip_multiqc = false
 
   // Boilerplate options
   name = false