diff --git a/doc/getting_started.md b/doc/getting_started.md
index b1a01f13775b6e9c49726c713e53878189758040..e41ffe528fd9745322fec917e00ae407f1a5f389 100644
--- a/doc/getting_started.md
+++ b/doc/getting_started.md
@@ -42,6 +42,7 @@ src/install_nextflow.sh
 ## Running a toy RNASeq quantification pipeline
 
 To run tests we first need to get a training set
+
 ```sh
 cd data
 git clone https://gitbio.ens-lyon.fr/LBMC/Hub/tiny_dataset.git
@@ -63,16 +64,24 @@ By default le `src/nextflow.config` file define 4 different profiles
 
 - `-profile docker` each process of the pipeline will be executed within a `docker` container locally
 - `-profile singularity` each process of the pipeline will be executed within a `singularity` container locally
-- `-profile psmn` each process will be sent as a separate job within a `singularity` container on the PSMN
+- `-profile psmn` each process will be sent as a separate job within a `charliecloud` container on the PSMN
 - `-profile ccin2p3` each process will be sent as a separate job within a `singularity` container on the CCIN2P3
 
-If the containers are not found locally, they are automatically downloaded before running the process. For the PSMN and CCIN2P3, the `singularity` images are downloaded in a shared folder (`/scratch/Bio/singularity` for the PSMN, and `/sps/lbmc/common/singularity/` for the CCIN2P3)
-
+If the containers are not found locally, they are automatically downloaded before running the process. For the PSMN and CCIN2P3, the `singularity` images are downloaded in a shared folder (`/scratch/Bio/charliecloud` for the PSMN, and `/sps/lbmc/common/singularity/` for the CCIN2P3)
 
 ### PSMN
 
+To have access to `charliecloud` on the PSMN you need to add the followin path to your `PATH` variable:
+
+````
+PATH=/Xnfs/abc/charliecloud_bin/:$PATH
+```
+
+You can add this line in your `~/.bashrc` or `~/.zshrc` file
+
 When running `nextflow` on the PSMN, we recommend to use `tmux` before launching the pipeline:
 
+
 ```sh
 tmux
 ./nextflow src/solution_RNASeq.nf --fastq "data/tiny_dataset/fastq/tiny2_R{1,2}.fastq.gz" --fasta "data/tiny_dataset/fasta/tiny_v2_10.fasta" --bed "data/tiny_dataset/annot/tiny.bed" -profile psmn
@@ -83,10 +92,10 @@ You can re-attach the `tmux` session, with the command `tmux a` (and press `ctrl
 
 ### CCIN2P3
 
-When runnning `nextflow` on the CCIN2P3, you cannot use `tmux`, instead you should send a *daemon* jobs which will launch the `nextflow` command.
+When runnning `nextflow` on the CCIN2P3, you cannot use `tmux`, instead you should send a _daemon_ jobs which will launch the `nextflow` command.
 You can edit the `src/ccin2p3.pbs` file to personalize your `nextflow` command and send it with the command:
 
 ```sh
 qsub src/ccin2p3.pbs
 ```
-
+````
diff --git a/src/nextflow.config b/src/nextflow.config
index f848cda055c4130869c21bc08eb15fa05ce5dbf7..02701889fec6c0910293425418e4c3e9c3cf7436 100644
--- a/src/nextflow.config
+++ b/src/nextflow.config
@@ -36,7 +36,8 @@ profiles {
     }
   }
   podman {
-    podman.enabled = true
+    charliecloud.enabled = true
+    charliecloud.cacheDir = "./bin/"
     process {
       errorStrategy = 'finish'
       memory = '16GB'
@@ -79,47 +80,39 @@ profiles {
     }
   }
   psmn {
-    singularity.enabled = true
-    singularity.cacheDir = "/Xnfs/abc/singularity/"
-    singularity.runOptions = "--bind /Xnfs,/scratch"
+    charliecloud.enabled = true
+    charliecloud.cacheDir = "/Xnfs/abc/charliecloud"
+    charliecloud.runOptions = "--bind /scratch:/scratch --bind /Xnfs:/Xnfs"
     process{
       errorStrategy = { sleep(Math.pow(2, task.attempt) * 200 as long); return 'retry' }
       maxRetries = 3
       withLabel: big_mem_mono_cpus {
-        executor = "sge"
-        clusterOptions = "-cwd -V"
+        executor = "slurm"
         cpus = 1
         memory = "128GB"
-        time = "12h"
-        queue = "monointeldeb128,monointeldeb192"
+        time = "24h"
+        clusterOptions = "--partition=Lake"
       }
       withLabel: big_mem_multi_cpus {
-        executor = "sge"
-        clusterOptions = "-cwd -V"
+        executor = "slurm"
         cpus = 32
         memory = "192GB"
         time = "24h"
-        queue = "CLG*,SLG*,Epyc*"
-        penv = "openmp32"
-
+        clusterOptions = "--partition=Lake"
       }
       withLabel: small_mem_mono_cpus {
-        executor = "sge"
-        clusterOptions = "-cwd -V"
+        executor = "slurm"
         cpus = 1
         memory = "16GB"
-        time = "12h"
-        queue = "monointeldeb128,monointeldeb192"
+        time = "24h"
+        clusterOptions = "--partition=Lake"
       }
       withLabel: small_mem_multi_cpus {
-        executor = "sge"
-        clusterOptions = "-cwd -V"
+        executor = "slurm"
         cpus = 32
         memory = "16GB"
         time = "24h"
-        queue = "CLG*,SLG*,Epyc*"
-        penv = "openmp32"
-
+        clusterOptions = "--partition=Lake"
       }
     }
   }