diff --git a/CHANGELOG.md b/CHANGELOG.md
index 27746ee7adb1b77eef6c06d9b43ad51904213875..2b11e76b012ecea9c82731e2462d4515a0a41f86 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 
 ## v1.3.0dev
 
+* Change min_mapq behavior by removing pairs only when the two mates have a mapq<min_mapq
 * Add HiCExplorer distance decay quality control
 * Add HiCExplorer TADs calling
 * Add insulation score TADs calling
diff --git a/bin/mergeSAM.py b/bin/mergeSAM.py
index 12917b16277a0a768269f611cd13422bccbe98a1..b40fd2363a97fe6ecb1cb11abc610648af1226b5 100755
--- a/bin/mergeSAM.py
+++ b/bin/mergeSAM.py
@@ -52,16 +52,16 @@ def get_args():
 
 
 def is_unique_bowtie2(read):
-	ret = False
-	if not read.is_unmapped and read.has_tag('AS'):
-		if read.has_tag('XS'):
-			primary =  read.get_tag('AS')
-			secondary = read.get_tag('XS')
-			if (primary > secondary):
-				ret = True
-		else:
-			ret = True
-	return ret
+    ret = False
+    if not read.is_unmapped and read.has_tag('AS'):
+        if read.has_tag('XS'):
+            primary =  read.get_tag('AS')
+            secondary = read.get_tag('XS')
+            if (primary > secondary):
+                ret = True
+        else:
+            ret = True
+    return ret
 
 ## Remove everything after "/" or " " in read's name
 def get_read_name(read):
@@ -71,249 +71,239 @@ def get_read_name(read):
 
 def sam_flag(read1, read2, hr1, hr2):
 	
-	f1 = read1.flag
-	f2 = read2.flag
-
-	if r1.is_unmapped == False:
-		r1_chrom = hr1.get_reference_name(r1.reference_id)
-	else:
-		r1_chrom = "*"
-	if r2.is_unmapped == False:
-		r2_chrom = hr2.get_reference_name(r2.reference_id)
-	else:
-		r2_chrom="*"
-
-
-  ##Relevant bitwise flags (flag in an 11-bit binary number)
-  ##1 The read is one of a pair
-  ##2 The alignment is one end of a proper paired-end alignment
-  ##4 The read has no reported alignments
-  ##8 The read is one of a pair and has no reported alignments
-  ##16 The alignment is to the reverse reference strand
-  ##32 The other mate in the paired-end alignment is aligned to the reverse reference strand
-  ##64 The read is the first (#1) mate in a pair
-  ##128 The read is the second (#2) mate in a pair
+    f1 = read1.flag
+    f2 = read2.flag
+
+    if r1.is_unmapped == False:
+        r1_chrom = hr1.get_reference_name(r1.reference_id)
+    else:
+        r1_chrom = "*"
+    if r2.is_unmapped == False:
+        r2_chrom = hr2.get_reference_name(r2.reference_id)
+    else:
+        r2_chrom="*"
+
+    ##Relevant bitwise flags (flag in an 11-bit binary number)
+    ##1 The read is one of a pair
+    ##2 The alignment is one end of a proper paired-end alignment
+    ##4 The read has no reported alignments
+    ##8 The read is one of a pair and has no reported alignments
+    ##16 The alignment is to the reverse reference strand
+    ##32 The other mate in the paired-end alignment is aligned to the reverse reference strand
+    ##64 The read is the first (#1) mate in a pair
+    ##128 The read is the second (#2) mate in a pair
   
-  ##The reads were mapped as single-end data, so should expect flags of 
-  ##0 (map to the '+' strand) or 16 (map to the '-' strand)
-  ##Output example: a paired-end read that aligns to the reverse strand 
-  ##and is the first mate in the pair will have flag 83 (= 64 + 16 + 2 + 1)
+    ##The reads were mapped as single-end data, so should expect flags of 
+    ##0 (map to the '+' strand) or 16 (map to the '-' strand)
+    ##Output example: a paired-end read that aligns to the reverse strand 
+    ##and is the first mate in the pair will have flag 83 (= 64 + 16 + 2 + 1)
   
-	if f1 & 0x4:
-		f1 = f1 | 0x8
+    if f1 & 0x4:
+        f1 = f1 | 0x8
 
-	if f2 & 0x4:
-		f2 = f2 | 0x8
+    if f2 & 0x4:
+        f2 = f2 | 0x8
     
-	if (not (f1 & 0x4) and not (f2 & 0x4)):
-    ##The flag should now indicate this is paired-end data
-		f1 = f1 | 0x1
-		f1 = f1 | 0x2
-		f2 = f2 | 0x1
-		f2 = f2 | 0x2
-  
+    if (not (f1 & 0x4) and not (f2 & 0x4)):
+        ##The flag should now indicate this is paired-end data
+        f1 = f1 | 0x1
+        f1 = f1 | 0x2
+        f2 = f2 | 0x1
+        f2 = f2 | 0x2  
     
-  ##Indicate if the pair is on the reverse strand
-	if f1 & 0x10:
-		f2 = f2 | 0x20
+    ##Indicate if the pair is on the reverse strand
+    if f1 & 0x10:
+        f2 = f2 | 0x20
   
-	if f2 & 0x10:
-		f1 = f1 | 0x20
+    if f2 & 0x10:
+        f1 = f1 | 0x20
   
-  ##Is this first or the second pair?
-	f1 = f1 | 0x40
-	f2 = f2 | 0x80
+    ##Is this first or the second pair?
+    f1 = f1 | 0x40
+    f2 = f2 | 0x80
   
     ##Insert the modified bitwise flags into the reads
-	read1.flag = f1
-	read2.flag = f2
+    read1.flag = f1
+    read2.flag = f2
 	
-	##Determine the RNEXT and PNEXT values (i.e. the positional values of a read's pair)
-	#RNEXT
-	if r1_chrom == r2_chrom:
-		read1.next_reference_id = r1.reference_id
-		read2.next_reference_id = r1.reference_id
-	else:
-		read1.next_reference_id = r2.reference_id
-		read2.next_reference_id = r1.reference_id
-   	#PNEXT
-	read1.next_reference_start = read2.reference_start
-	read2.next_reference_start = read1.reference_start
+    ##Determine the RNEXT and PNEXT values (i.e. the positional values of a read's pair)
+    #RNEXT
+    if r1_chrom == r2_chrom:
+        read1.next_reference_id = r1.reference_id
+        read2.next_reference_id = r1.reference_id
+    else:
+        read1.next_reference_id = r2.reference_id
+        read2.next_reference_id = r1.reference_id
+    #PNEXT
+    read1.next_reference_start = read2.reference_start
+    read2.next_reference_start = read1.reference_start
 
-	return(read1, read2)
+    return(read1, read2)
 
 
 
 if __name__ == "__main__":
     ## Read command line arguments
-	opts = get_args()
-	inputFile = None
-	outputFile = None
-	mapq = None
-	report_single = False
-	report_multi = False
-	verbose = False
-	stat = False
-	output = "-"
-
-	if len(opts) == 0:
-		usage()
-		sys.exit()
-
-	for opt, arg in opts:
-		if opt in ("-h", "--help"):
-			usage()
-			sys.exit()
-		elif opt in ("-f", "--forward"):
-			R1file = arg
-		elif opt in ("-r", "--reverse"):
-			R2file = arg
-		elif opt in ("-o", "--output"):
-			output = arg
-		elif opt in ("-q", "--qual"):
-			mapq = arg
-		elif opt in ("-s", "--single"):
-			report_single = True
-		elif opt in ("-m", "--multi"):
-			report_multi = True
-		elif opt in ("-t", "--stat"):
-			stat = True
-		elif opt in ("-v", "--verbose"):
-			verbose = True
-		else:
-			assert False, "unhandled option"
+    opts = get_args()
+    inputFile = None
+    outputFile = None
+    mapq = None
+    report_single = False
+    report_multi = False
+    verbose = False
+    stat = False
+    output = "-"
+
+    if len(opts) == 0:
+        usage()
+        sys.exit()
+
+    for opt, arg in opts:
+        if opt in ("-h", "--help"):
+            usage()
+            sys.exit()
+        elif opt in ("-f", "--forward"):
+            R1file = arg
+        elif opt in ("-r", "--reverse"):
+            R2file = arg
+        elif opt in ("-o", "--output"):
+            output = arg
+        elif opt in ("-q", "--qual"):
+            mapq = arg
+        elif opt in ("-s", "--single"):
+            report_single = True
+        elif opt in ("-m", "--multi"):
+            report_multi = True
+        elif opt in ("-t", "--stat"):
+            stat = True
+        elif opt in ("-v", "--verbose"):
+            verbose = True
+        else:
+            assert False, "unhandled option"
 
     ## Verbose mode
-	if verbose:
-		print("## mergeBAM.py")
-		print("## forward=", R1file)
-		print("## reverse=", R2file)
-		print("## output=", output)
-		print("## min mapq=", mapq)
-		print("## report_single=", report_single)
-		print("## report_multi=", report_multi)
-		print("## verbose=", verbose)
+    if verbose:
+        print("## mergeBAM.py")
+        print("## forward=", R1file)
+        print("## reverse=", R2file)
+        print("## output=", output)
+        print("## min mapq=", mapq)
+        print("## report_single=", report_single)
+        print("## report_multi=", report_multi)
+        print("## verbose=", verbose)
 
     ## Initialize variables
-	tot_pairs_counter = 0
-	multi_pairs_counter = 0
-	uniq_pairs_counter = 0
-	unmapped_pairs_counter = 0 
-	lowq_pairs_counter = 0
-	multi_singles_counter = 0
-	uniq_singles_counter = 0
-	lowq_singles_counter = 0
+    tot_pairs_counter = 0
+    multi_pairs_counter = 0
+    uniq_pairs_counter = 0
+    unmapped_pairs_counter = 0 
+    lowq_pairs_counter = 0
+    multi_singles_counter = 0
+    uniq_singles_counter = 0
+    lowq_singles_counter = 0
 
     #local_counter = 0
-	paired_reads_counter = 0
-	singleton_counter = 0
-	reads_counter = 0
-	r1 = None
-	r2 = None
+    paired_reads_counter = 0
+    singleton_counter = 0
+    reads_counter = 0
+    r1 = None
+    r2 = None
 
     ## Reads are 0-based too (for both SAM and BAM format)
     ## Loop on all reads
-	if verbose:
-		print("## Merging forward and reverse tags ...")
-	with pysam.Samfile(R1file, "rb") as hr1, pysam.Samfile(R2file, "rb") as hr2: 
-		if output == "-":
-			outfile = pysam.AlignmentFile(output, "w", template=hr1)
-		else:
-			outfile = pysam.AlignmentFile(output, "wb", template=hr1)
-		for r1, r2 in zip(hr1.fetch(until_eof=True), hr2.fetch(until_eof=True)):
-			reads_counter +=1
-
-            #print r1
-            #print r2
-            #print hr1.getrname(r1.tid)
-            #print hr2.getrname(r2.tid)
-
-			if (reads_counter % 1000000 == 0 and verbose):
-				print("##", reads_counter)
+    if verbose:
+        print("## Merging forward and reverse tags ...")
+    
+    with pysam.Samfile(R1file, "rb") as hr1, pysam.Samfile(R2file, "rb") as hr2: 
+        if output == "-":
+            outfile = pysam.AlignmentFile(output, "w", template=hr1)
+        else:
+            outfile = pysam.AlignmentFile(output, "wb", template=hr1)
+	
+        for r1, r2 in zip(hr1.fetch(until_eof=True), hr2.fetch(until_eof=True)):
+            reads_counter +=1
+            if (reads_counter % 1000000 == 0 and verbose):
+                print("##", reads_counter)
                 
-			if get_read_name(r1) == get_read_name(r2):
+            if get_read_name(r1) == get_read_name(r2):
+                ## both unmapped
+                if r1.is_unmapped == True and r2.is_unmapped == True:
+                    unmapped_pairs_counter += 1
+                    continue
                     
-                 ## both unmapped
-				if r1.is_unmapped == True and r2.is_unmapped == True:
-					unmapped_pairs_counter += 1
-					continue
-
                 ## both mapped
-				elif r1.is_unmapped == False and r2.is_unmapped == False:
-                     ## quality
-					if mapq != None and (r1.mapping_quality < int(mapq) or r2.mapping_quality < int(mapq)):
-						lowq_pairs_counter += 1
-						continue
+                elif r1.is_unmapped == False and r2.is_unmapped == False:
+                    ## quality
+                    if mapq != None and (r1.mapping_quality < int(mapq) and r2.mapping_quality < int(mapq)):
+                        lowq_pairs_counter += 1
+                        continue
                  
-                     ## Unique mapping
-					if is_unique_bowtie2(r1) == True and is_unique_bowtie2(r2) == True:
-						uniq_pairs_counter += 1
-					else:
-						multi_pairs_counter += 1
-						if report_multi == False:
-							continue
-		# one end mapped, other is not
-				else:
-					singleton_counter += 1
-					if report_single == False:
-						continue
-					if r1.is_unmapped == False:  ## first end is mapped, second is not
-                         ## quality
-						if mapq != None and (r1.mapping_quality < int(mapq)): 
-							lowq_singles_counter += 1
-							continue
-                         ## Unique mapping
-						if is_unique_bowtie2(r1) == True:
-							uniq_singles_counter += 1
-						else:
-							multi_singles_counter += 1
-							if report_multi == False:
-								continue
-					else:  ## second end is mapped, first is not
-                         ## quality
-						if mapq != None and (r2.mapping_quality < int(mapq)): 
-							lowq_singles_counter += 1
-							continue
-                         ## Unique mapping
-						if is_unique_bowtie2(r2) == True:
-							uniq_singles_counter += 1
-						else:
-							multi_singles_counter += 1
-							if report_multi == False:
-								continue
+                    ## Unique mapping
+                    if is_unique_bowtie2(r1) == True and is_unique_bowtie2(r2) == True:
+                        uniq_pairs_counter += 1
+                    else:
+                        multi_pairs_counter += 1
+                        if report_multi == False:
+                            continue
+
+                ## One mate maped
+                else:
+                    singleton_counter += 1
+                    if report_single == False:
+                        continue
+                    if r1.is_unmapped == False:  ## first end is mapped, second is not
+                        ## quality
+                        if mapq != None and (r1.mapping_quality < int(mapq)): 
+                            lowq_singles_counter += 1
+                            continue
+                        ## Unique mapping
+                        if is_unique_bowtie2(r1) == True:
+                            uniq_singles_counter += 1
+                        else:
+                            multi_singles_counter += 1
+                            if report_multi == False:
+                                continue
+                    else:  ## second end is mapped, first is not
+                        ## quality
+                        if mapq != None and (r2.mapping_quality < int(mapq)): 
+                            lowq_singles_counter += 1
+                            continue
+                        ## Unique mapping
+                        if is_unique_bowtie2(r2) == True:
+                            uniq_singles_counter += 1
+                        else:
+                            multi_singles_counter += 1
+                            if report_multi == False:
+                                continue
+
+                tot_pairs_counter += 1          
+                (r1, r2) = sam_flag(r1,r2, hr1, hr2)
 
-				tot_pairs_counter += 1          
-				(r1, r2) = sam_flag(r1,r2, hr1, hr2)
-
-                #print hr1.getrname(r1.tid)
-                #print hr2.getrname(r2.tid)
-                #print r1
-                #print r2
                 ## Write output
-				outfile.write(r1)
-				outfile.write(r2)
-
-			else:
-				print("Forward and reverse reads not paired. Check that BAM files have the same read names and are sorted.")
-				sys.exit(1)
-
-	if stat:
-		if output == '-':
-			statfile = "pairing.stat"
-		else:
-			statfile = re.sub('\.bam$', '.pairstat', output)
-		with open(statfile, 'w') as handle_stat:
-			handle_stat.write("Total_pairs_processed\t" + str(reads_counter) + "\t" + str(round(float(reads_counter)/float(reads_counter)*100,3)) + "\n")
-			handle_stat.write("Unmapped_pairs\t" + str(unmapped_pairs_counter) + "\t" + str(round(float(unmapped_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-			handle_stat.write("Low_qual_pairs\t" + str(lowq_pairs_counter) + "\t" + str(round(float(lowq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-			handle_stat.write("Unique_paired_alignments\t" + str(uniq_pairs_counter) + "\t" + str(round(float(uniq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-			handle_stat.write("Multiple_pairs_alignments\t" + str(multi_pairs_counter) + "\t" + str(round(float(multi_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-			handle_stat.write("Pairs_with_singleton\t" + str(singleton_counter) + "\t" + str(round(float(singleton_counter)/float(reads_counter)*100,3)) + "\n")  
-			handle_stat.write("Low_qual_singleton\t" + str(lowq_singles_counter) + "\t" + str(round(float(lowq_singles_counter)/float(reads_counter)*100,3)) + "\n")
-			handle_stat.write("Unique_singleton_alignments\t" + str(uniq_singles_counter) + "\t" + str(round(float(uniq_singles_counter)/float(reads_counter)*100,3)) + "\n")
-			handle_stat.write("Multiple_singleton_alignments\t" + str(multi_singles_counter) + "\t" + str(round(float(multi_singles_counter)/float(reads_counter)*100,3)) + "\n")
-			handle_stat.write("Reported_pairs\t" + str(tot_pairs_counter) + "\t" + str(round(float(tot_pairs_counter)/float(reads_counter)*100,3)) + "\n")
-	hr1.close()
-	hr2.close()
-	outfile.close()
+                outfile.write(r1)
+                outfile.write(r2)
+                
+            else:
+                print("Forward and reverse reads not paired. Check that BAM files have the same read names and are sorted.")
+                sys.exit(1)
+
+        if stat:
+            if output == '-':
+                statfile = "pairing.stat"
+            else:
+                statfile = re.sub('\.bam$', '.pairstat', output)
+            with open(statfile, 'w') as handle_stat:
+                handle_stat.write("Total_pairs_processed\t" + str(reads_counter) + "\t" + str(round(float(reads_counter)/float(reads_counter)*100,3)) + "\n")
+                handle_stat.write("Unmapped_pairs\t" + str(unmapped_pairs_counter) + "\t" + str(round(float(unmapped_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+                handle_stat.write("Low_qual_pairs\t" + str(lowq_pairs_counter) + "\t" + str(round(float(lowq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+                handle_stat.write("Unique_paired_alignments\t" + str(uniq_pairs_counter) + "\t" + str(round(float(uniq_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+                handle_stat.write("Multiple_pairs_alignments\t" + str(multi_pairs_counter) + "\t" + str(round(float(multi_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+                handle_stat.write("Pairs_with_singleton\t" + str(singleton_counter) + "\t" + str(round(float(singleton_counter)/float(reads_counter)*100,3)) + "\n")  
+                handle_stat.write("Low_qual_singleton\t" + str(lowq_singles_counter) + "\t" + str(round(float(lowq_singles_counter)/float(reads_counter)*100,3)) + "\n")
+                handle_stat.write("Unique_singleton_alignments\t" + str(uniq_singles_counter) + "\t" + str(round(float(uniq_singles_counter)/float(reads_counter)*100,3)) + "\n")
+                handle_stat.write("Multiple_singleton_alignments\t" + str(multi_singles_counter) + "\t" + str(round(float(multi_singles_counter)/float(reads_counter)*100,3)) + "\n")
+                handle_stat.write("Reported_pairs\t" + str(tot_pairs_counter) + "\t" + str(round(float(tot_pairs_counter)/float(reads_counter)*100,3)) + "\n")
+    hr1.close()
+    hr2.close()
+    outfile.close()
 
diff --git a/conf/hicpro.config b/conf/hicpro.config
deleted file mode 100644
index cd0cf0b5a54f860312f49ac193802d53964ce686..0000000000000000000000000000000000000000
--- a/conf/hicpro.config
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * -------------------------------------------------
- *  Nextflow config file for Genomes paths
- * -------------------------------------------------
- * Defines reference genomes
- * Can be used by any config that customises the base
- * path using $params.genomes_base / --genomes_base
- */
-
-params {
-
-       // Alignment options
-       bwt2_opts_end2end = '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'
-       bwt2_opts_trimmed = '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'
-       min_mapq = 10
-
-       // Digestion Hi-C
-       restriction_site = 'A^AGCTT'
-       ligation_site = 'AAGCTAGCTT'
-       min_restriction_fragment_size = 
-       max_restriction_fragment_size = 
-       min_insert_size = 
-       max_insert_size =
-
-       // Hi-C Processing
-       min_cis_dist = 
-       rm_singleton = true
-       rm_multi = true
-       rm_dup = true
-
-       bin_size = '1000000,500000'
-
-       ice_max_iter = 100
-       ice_filer_low_count_perc = 0.02
-       ice_filer_high_count_perc =  0
-       ice_eps = 0.1
-}
-
diff --git a/conf/test.config b/conf/test.config
index 37d07472584abe385353731bd7c741490892f049..5988a32428569c24f4f4d321ee727f617d3da202 100644
--- a/conf/test.config
+++ b/conf/test.config
@@ -24,9 +24,12 @@ params {
   // Annotations
   fasta = 'https://github.com/nf-core/test-datasets/raw/hic/reference/W303_SGD_2015_JRIU00000000.fsa'
   digestion = 'hindiii'
-  min_mapq = 20
+  min_mapq = 10
   min_restriction_fragment_size = 100
   max_restriction_fragment_size = 100000
   min_insert_size = 100
   max_insert_size = 600
+
+  //hicexplorer does not run
+  skip_dist_decay = true
 }
diff --git a/main.nf b/main.nf
index 24f9138e0fc770b6211553f142817328608a7714..4c03cdfc3443ed6371be59e549928c4811cf641e 100644
--- a/main.nf
+++ b/main.nf
@@ -851,7 +851,7 @@ process run_ice{
  * Cooler
  */
 
-process cooler_build {
+process convert_to_pairs {
    tag "$sample"
    label 'process_medium'
 
@@ -863,18 +863,17 @@ process cooler_build {
    file chrsize from chrsize_build.collect()
 
    output:
-   set val(sample), file("contacts.sorted.txt.gz"), file("contacts.sorted.txt.gz.px2") into cool_build, cool_build_zoom
+   set val(sample), file("*.txt.gz") into cool_build, cool_build_zoom
 
    script:
    """
-   awk '{OFS="\t";print \$2,\$3,\$4,\$5,\$6,\$7,1}' $vpairs | sed -e 's/+/1/g' -e 's/-/16/g' > contacts.txt
-   cooler csort --nproc ${task.cpus} -c1 1 -p1 2 -s1 3 -c2 4 -p2 5 -s2 6 \
-	  contacts.txt \
-          -o contacts.sorted.txt.gz \
-	  ${chrsize}
+   ## chr/pos/strand/chr/pos/strand
+   awk '{OFS="\t";print \$2,\$3,\$4,\$5,\$6,\$7}' $vpairs | sed -e 's/+/1/g' -e 's/-/16/g' > contacts.txt
+   gzip contacts.txt
    """
 }
 
+
 process cooler_raw {
   tag "$sample - ${res}"
   label 'process_medium'
@@ -883,7 +882,7 @@ process cooler_raw {
               saveAs: {filename -> filename.indexOf(".cool") > 0 ? "raw/cool/$filename" : "raw/txt/$filename"}
 
   input:
-  set val(sample), file(contacts), file(index), val(res) from cool_build.combine(map_res_cool)
+  set val(sample), file(contacts), val(res) from cool_build.combine(map_res_cool)
   file chrsize from chrsize_raw.collect()
 
   output:
@@ -893,7 +892,7 @@ process cooler_raw {
   script:
   """
   cooler makebins ${chrsize} ${res} > ${sample}_${res}.bed
-  cooler cload pairix --nproc ${task.cpus} ${sample}_${res}.bed ${contacts} ${sample}_${res}.cool
+  cooler cload pairs -c1 1 -p1 2 -c2 4 -p2 5 ${sample}_${res}.bed ${contacts} ${sample}_${res}.cool
   cooler dump ${sample}_${res}.cool | awk '{OFS="\t"; print \$1+1,\$2+1,\$3}' > ${sample}_${res}.txt
   """
 }
@@ -933,7 +932,7 @@ process cooler_zoomify {
    !params.skip_mcool
 
    input:
-   set val(sample), file(contacts), file(index) from cool_build_zoom
+   set val(sample), file(contacts)  from cool_build_zoom
    file chrsize from chrsize_zoom.collect()
 
    output:
@@ -942,7 +941,7 @@ process cooler_zoomify {
    script:
    """
    cooler makebins ${chrsize} ${params.res_zoomify} > bins.bed
-   cooler cload pairix --nproc ${task.cpus} bins.bed contacts.sorted.txt.gz ${sample}.cool
+   cooler cload pairs -c1 1 -p1 2 -c2 4 -p2 5 bins.bed ${contacts} ${sample}.cool
    cooler zoomify --nproc ${task.cpus} --balance ${sample}.cool
    """
 }
@@ -966,7 +965,7 @@ process convert_to_h5 {
   script:
   """
   hicConvertFormat --matrices ${maps} \
-  		   --outFileName ${sample}.h5 \
+  		   --outFileName ${maps.baseName}.h5 \
 		   --resolution ${res} \
 		   --inputFormat cool \
 		   --outputFormat h5 \
@@ -1001,11 +1000,10 @@ process dist_decay {
 
 
   script:
-  prefix = h5mat.toString() - ~/(\.h5)?$/
   """
   hicPlotDistVsCounts --matrices ${h5mat} \
-                      --plotFile ${prefix}_distcount.png \
-  		      --outFileData ${prefix}_distcount.txt
+                      --plotFile ${h5mat.baseName}_distcount.png \
+  		      --outFileData ${h5mat.baseName}_distcount.txt
   """
 }
 
@@ -1106,7 +1104,7 @@ process multiqc {
    file (mqc_custom_config) from ch_multiqc_custom_config.collect().ifEmpty([])
    file ('input_*/*') from all_mstats.concat(all_mergestat).collect()
    file ('software_versions/*') from software_versions_yaml
-   file workflow_summary from create_workflow_summary(summary)
+   file workflow_summary from ch_workflow_summary.collect()
 
    output:
    file "*multiqc_report.html" into multiqc_report
diff --git a/nextflow.config b/nextflow.config
index 4ef6edc0aa2a9e0fdb9fd170fde4a84c3a004142..782a1c1b0d5a69e80faf2a6fe8185d50f36295b2 100644
--- a/nextflow.config
+++ b/nextflow.config
@@ -17,8 +17,31 @@ params {
   restriction_fragments = false
   save_reference = false
  
+  // Dnase Hi-C
+  dnase = false
+  min_cis_dist = 0
+
+  // Mapping
+  split_fastq = false
+  fastq_chunks_size = 20000000
+  save_interaction_bam = false
+  save_aligned_intermediates = false
+  bwt2_opts_end2end = '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'
+  bwt2_opts_trimmed = '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'
+  keep_dups = false
+  keep_multi = false
+  min_mapq = 10
+
+
   // Digestion Hi-C
-  digestion = false
+  digestion = 'hindiii'
+  restriction_site = ''
+  ligation_site = ''
+  min_restriction_fragment_size = 0
+  max_restriction_fragment_size = 0
+  min_insert_size = 0
+  max_insert_size =0
+
   digest {
     'hindiii'{
        restriction_site='A^AGCTT'
@@ -37,26 +60,11 @@ params {
        ligation_site='GATCGATC,GATCGANT,GANTGATC,GANTGANT'
     }
   }
-  min_restriction_fragment_size = 0
-  max_restriction_fragment_size = 0
-  min_insert_size = 0
-  max_insert_size = 0
 
   // Dnase Hi-C
   dnase = false
   min_cis_dist = 0
 
-  // Mapping
-  split_fastq = false
-  fastq_chunks_size = 20000000
-  save_interaction_bam = false
-  save_aligned_intermediates = false
-  bwt2_opts_end2end = '--very-sensitive -L 30 --score-min L,-0.6,-0.2 --end-to-end --reorder'
-  bwt2_opts_trimmed = '--very-sensitive -L 20 --score-min L,-0.6,-0.2 --end-to-end --reorder'
-  keep_dups = false
-  keep_multi = false
-  min_mapq = 10
-
   // Contact maps
   bin_size = '1000000,500000'
   ice_max_iter = 100
@@ -66,8 +74,8 @@ params {
 
   // Downstream Analysis
   res_dist_decay = '1000000'
-  tads_caller = "hicexplorer,insulation"
-  res_tads = '40000,20000'
+  tads_caller = 'insulation'
+  res_tads = '40000'
   res_zoomify = '5000'
 
   // Workflow