Skip to content
Snippets Groups Projects
Verified Commit e0268aca authored by Laurent Modolo's avatar Laurent Modolo
Browse files

Merge branch 'dev'

parents 90f74bd7 9422d648
No related branches found
No related tags found
No related merge requests found
......@@ -42,6 +42,7 @@ src/install_nextflow.sh
## Running a toy RNASeq quantification pipeline
To run tests we first need to get a training set
```sh
cd data
git clone https://gitbio.ens-lyon.fr/LBMC/Hub/tiny_dataset.git
......@@ -63,16 +64,24 @@ By default le `src/nextflow.config` file define 4 different profiles
- `-profile docker` each process of the pipeline will be executed within a `docker` container locally
- `-profile singularity` each process of the pipeline will be executed within a `singularity` container locally
- `-profile psmn` each process will be sent as a separate job within a `singularity` container on the PSMN
- `-profile psmn` each process will be sent as a separate job within a `charliecloud` container on the PSMN
- `-profile ccin2p3` each process will be sent as a separate job within a `singularity` container on the CCIN2P3
If the containers are not found locally, they are automatically downloaded before running the process. For the PSMN and CCIN2P3, the `singularity` images are downloaded in a shared folder (`/scratch/Bio/singularity` for the PSMN, and `/sps/lbmc/common/singularity/` for the CCIN2P3)
If the containers are not found locally, they are automatically downloaded before running the process. For the PSMN and CCIN2P3, the `singularity` images are downloaded in a shared folder (`/scratch/Bio/charliecloud` for the PSMN, and `/sps/lbmc/common/singularity/` for the CCIN2P3)
### PSMN
To have access to `charliecloud` on the PSMN you need to add the followin path to your `PATH` variable:
````
PATH=/Xnfs/abc/charliecloud_bin/:$PATH
```
You can add this line in your `~/.bashrc` or `~/.zshrc` file
When running `nextflow` on the PSMN, we recommend to use `tmux` before launching the pipeline:
```sh
tmux
./nextflow src/solution_RNASeq.nf --fastq "data/tiny_dataset/fastq/tiny2_R{1,2}.fastq.gz" --fasta "data/tiny_dataset/fasta/tiny_v2_10.fasta" --bed "data/tiny_dataset/annot/tiny.bed" -profile psmn
......@@ -83,10 +92,10 @@ You can re-attach the `tmux` session, with the command `tmux a` (and press `ctrl
### CCIN2P3
When runnning `nextflow` on the CCIN2P3, you cannot use `tmux`, instead you should send a *daemon* jobs which will launch the `nextflow` command.
When runnning `nextflow` on the CCIN2P3, you cannot use `tmux`, instead you should send a _daemon_ jobs which will launch the `nextflow` command.
You can edit the `src/ccin2p3.pbs` file to personalize your `nextflow` command and send it with the command:
```sh
qsub src/ccin2p3.pbs
```
````
......@@ -36,7 +36,8 @@ profiles {
}
}
podman {
podman.enabled = true
charliecloud.enabled = true
charliecloud.cacheDir = "./bin/"
process {
errorStrategy = 'finish'
memory = '16GB'
......@@ -79,47 +80,39 @@ profiles {
}
}
psmn {
singularity.enabled = true
singularity.cacheDir = "/Xnfs/abc/singularity/"
singularity.runOptions = "--bind /Xnfs,/scratch"
charliecloud.enabled = true
charliecloud.cacheDir = "/Xnfs/abc/charliecloud"
charliecloud.runOptions = "--bind /scratch:/scratch --bind /Xnfs:/Xnfs"
process{
errorStrategy = { sleep(Math.pow(2, task.attempt) * 200 as long); return 'retry' }
maxRetries = 3
withLabel: big_mem_mono_cpus {
executor = "sge"
clusterOptions = "-cwd -V"
executor = "slurm"
cpus = 1
memory = "128GB"
time = "12h"
queue = "monointeldeb128,monointeldeb192"
time = "24h"
clusterOptions = "--partition=Lake"
}
withLabel: big_mem_multi_cpus {
executor = "sge"
clusterOptions = "-cwd -V"
executor = "slurm"
cpus = 32
memory = "192GB"
time = "24h"
queue = "CLG*,SLG*,Epyc*"
penv = "openmp32"
clusterOptions = "--partition=Lake"
}
withLabel: small_mem_mono_cpus {
executor = "sge"
clusterOptions = "-cwd -V"
executor = "slurm"
cpus = 1
memory = "16GB"
time = "12h"
queue = "monointeldeb128,monointeldeb192"
time = "24h"
clusterOptions = "--partition=Lake"
}
withLabel: small_mem_multi_cpus {
executor = "sge"
clusterOptions = "-cwd -V"
executor = "slurm"
cpus = 32
memory = "16GB"
time = "24h"
queue = "CLG*,SLG*,Epyc*"
penv = "openmp32"
clusterOptions = "--partition=Lake"
}
}
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment