diff --git a/R/precision_recall.R b/R/precision_recall.R
index b61bccd3f1ff90c5485d95f0f614505ad452398b..920b403baabd3322270132652c2a3a0ba1a07fd7 100644
--- a/R/precision_recall.R
+++ b/R/precision_recall.R
@@ -71,17 +71,15 @@ get_pr_object <- function(evaldata_params, col_param = "description", col_truth
   pr_auc_params <- join_dtf(pr_auc_params, random_classifier_auc_params , 
                             k1 = col_param, k2 = col_param)
   names(pr_auc_params)[ names(pr_auc_params) == "random_classifier_auc_params" ] <- "pr_randm_AUC"
-
-
-
+  pr_auc_params$pr_performance_ratio <- pr_auc_params$pr_AUC/pr_auc_params$pr_randm_AUC
 
   ## -- aggregate
   pr_curve_agg <- dt_evaldata_params[, compute_pr_curve(.SD), by = "from", .SDcols=c(col_truth, col_score)]
   pr_auc_agg <- pr_curve_agg[, compute_pr_auc(.SD), by = "from", .SDcols=c("recall", "precision")]
   names(pr_auc_agg)[ names(pr_auc_agg) == "V1" ] <- "pr_AUC"
   pr_auc_agg$pr_randm_AUC <- random_classifier_auc_agg
-  
-  
+  pr_auc_agg$pr_performance_ratio <- pr_auc_agg$pr_AUC/pr_auc_agg$pr_randm_AUC
+
   return(list(byparams = list(pr_curve = as.data.frame(pr_curve_params),
                               pr_auc = as.data.frame(pr_auc_params)),
               aggregate = list(pr_curve = as.data.frame(pr_curve_agg),
diff --git a/R/rocr_functions.R b/R/rocr_functions.R
index b61bccd3f1ff90c5485d95f0f614505ad452398b..920b403baabd3322270132652c2a3a0ba1a07fd7 100644
--- a/R/rocr_functions.R
+++ b/R/rocr_functions.R
@@ -71,17 +71,15 @@ get_pr_object <- function(evaldata_params, col_param = "description", col_truth
   pr_auc_params <- join_dtf(pr_auc_params, random_classifier_auc_params , 
                             k1 = col_param, k2 = col_param)
   names(pr_auc_params)[ names(pr_auc_params) == "random_classifier_auc_params" ] <- "pr_randm_AUC"
-
-
-
+  pr_auc_params$pr_performance_ratio <- pr_auc_params$pr_AUC/pr_auc_params$pr_randm_AUC
 
   ## -- aggregate
   pr_curve_agg <- dt_evaldata_params[, compute_pr_curve(.SD), by = "from", .SDcols=c(col_truth, col_score)]
   pr_auc_agg <- pr_curve_agg[, compute_pr_auc(.SD), by = "from", .SDcols=c("recall", "precision")]
   names(pr_auc_agg)[ names(pr_auc_agg) == "V1" ] <- "pr_AUC"
   pr_auc_agg$pr_randm_AUC <- random_classifier_auc_agg
-  
-  
+  pr_auc_agg$pr_performance_ratio <- pr_auc_agg$pr_AUC/pr_auc_agg$pr_randm_AUC
+
   return(list(byparams = list(pr_curve = as.data.frame(pr_curve_params),
                               pr_auc = as.data.frame(pr_auc_params)),
               aggregate = list(pr_curve = as.data.frame(pr_curve_agg),
diff --git a/R/simulation_report.R b/R/simulation_report.R
index 6955f61e9d83d3123f6c26b7ceedcd6d06d742b7..6717854edf9d9681eff1dacc74661b1f8a1b9a44 100644
--- a/R/simulation_report.R
+++ b/R/simulation_report.R
@@ -142,7 +142,8 @@ evaluation_report <- function(list_tmb, dds, mock_obj, coeff_threshold, alt_hypo
   ## -- acc, recall, sensib, speci, ...
   metrics_obj <- get_ml_metrics_obj(eval_data2metrics, alpha_risk )
   ## -- merge all metrics in one obj
-  model_perf_obj <- get_performances_metrics_obj( r2_params = params_identity_eval$R2,                                dispersion_identity_eval$R2,
+  model_perf_obj <- get_performances_metrics_obj( r2_params = params_identity_eval$R2,                        
+                                                  dispersion_identity_eval$R2, 
                                                   pr_curve_obj,
                                                   roc_curve_obj,
                                                   metrics_obj )
diff --git a/dev/flat_full.Rmd b/dev/flat_full.Rmd
index 1a95b4d62bbc1918396fb86a3b28a6374ff48d76..32fbba8ecdc446281c21728a0a810d91076fddc0 100644
--- a/dev/flat_full.Rmd
+++ b/dev/flat_full.Rmd
@@ -6833,17 +6833,15 @@ get_pr_object <- function(evaldata_params, col_param = "description", col_truth
   pr_auc_params <- join_dtf(pr_auc_params, random_classifier_auc_params , 
                             k1 = col_param, k2 = col_param)
   names(pr_auc_params)[ names(pr_auc_params) == "random_classifier_auc_params" ] <- "pr_randm_AUC"
-
-
-
+  pr_auc_params$pr_performance_ratio <- pr_auc_params$pr_AUC/pr_auc_params$pr_randm_AUC
 
   ## -- aggregate
   pr_curve_agg <- dt_evaldata_params[, compute_pr_curve(.SD), by = "from", .SDcols=c(col_truth, col_score)]
   pr_auc_agg <- pr_curve_agg[, compute_pr_auc(.SD), by = "from", .SDcols=c("recall", "precision")]
   names(pr_auc_agg)[ names(pr_auc_agg) == "V1" ] <- "pr_AUC"
   pr_auc_agg$pr_randm_AUC <- random_classifier_auc_agg
-  
-  
+  pr_auc_agg$pr_performance_ratio <- pr_auc_agg$pr_AUC/pr_auc_agg$pr_randm_AUC
+
   return(list(byparams = list(pr_curve = as.data.frame(pr_curve_params),
                               pr_auc = as.data.frame(pr_auc_params)),
               aggregate = list(pr_curve = as.data.frame(pr_curve_agg),
@@ -8125,7 +8123,8 @@ evaluation_report <- function(list_tmb, dds, mock_obj, coeff_threshold, alt_hypo
   ## -- acc, recall, sensib, speci, ...
   metrics_obj <- get_ml_metrics_obj(eval_data2metrics, alpha_risk )
   ## -- merge all metrics in one obj
-  model_perf_obj <- get_performances_metrics_obj( r2_params = params_identity_eval$R2,                                dispersion_identity_eval$R2,
+  model_perf_obj <- get_performances_metrics_obj( r2_params = params_identity_eval$R2,                        
+                                                  dispersion_identity_eval$R2, 
                                                   pr_curve_obj,
                                                   roc_curve_obj,
                                                   metrics_obj )