From b72cf51a12ec5489f62675d540007321f8b619a5 Mon Sep 17 00:00:00 2001 From: Daniel Date: Sun, 24 Nov 2024 17:28:10 +0100 Subject: [PATCH] docs --- R/performance_roc.R | 17 +++++++++++++++-- man/performance_roc.Rd | 18 ++++++++++++++++-- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/R/performance_roc.R b/R/performance_roc.R index 3b02d7b61..5cdd8d17f 100644 --- a/R/performance_roc.R +++ b/R/performance_roc.R @@ -1,8 +1,21 @@ #' @title Simple ROC curve #' @name performance_roc #' -#' @description This function calculates a simple ROC curves of x/y coordinates -#' based on response and predictions of a binomial model. +#' @description +#' This function calculates a simple ROC curves of x/y coordinates based on +#' response and predictions of a binomial model. +#' +#' It returns the area under the curve (AUC) as a percentage, which corresponds +#' to the probability that a randomly chosen observation of "condition 1" is +#' correctly classified by the model as having a higher probability of being +#' "condition 1" than a randomly chosen "condition 2" observation. +#' +#' Applying `as.data.frame()` to the output returns a data frame containing the +#' following: +#' - `Sensitivity` (that actually corresponds to `1 - Specificity`): It is the +#' False Positive Rate. +#' - `Sensitivity`: It is the True Positive Rate, which is the proportion of +#' correctly classified "condition 1" observations. #' #' @param x A numeric vector, representing the outcome (0/1), or a model with #' binomial outcome. diff --git a/man/performance_roc.Rd b/man/performance_roc.Rd index e14ef04c9..1e116af70 100644 --- a/man/performance_roc.Rd +++ b/man/performance_roc.Rd @@ -26,8 +26,22 @@ curve (\code{Sensitivity} and \code{Specificity}), and a column with the model name. } \description{ -This function calculates a simple ROC curves of x/y coordinates -based on response and predictions of a binomial model. +This function calculates a simple ROC curves of x/y coordinates based on +response and predictions of a binomial model. + +It returns the area under the curve (AUC) as a percentage, which corresponds +to the probability that a randomly chosen observation of "condition 1" is +correctly classified by the model as having a higher probability of being +"condition 1" than a randomly chosen "condition 2" observation. + +Applying \code{as.data.frame()} to the output returns a data frame containing the +following: +\itemize{ +\item \code{Sensitivity} (that actually corresponds to \code{1 - Specificity}): It is the +False Positive Rate. +\item \code{Sensitivity}: It is the True Positive Rate, which is the proportion of +correctly classified "condition 1" observations. +} } \note{ There is also a \href{https://easystats.github.io/see/articles/performance.html}{\code{plot()}-method}