diff --git a/NAMESPACE b/NAMESPACE index c41ef7009..35c1eb7c0 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -5,14 +5,14 @@ S3method(print,scoringutils_check) S3method(quantile_to_interval,data.frame) S3method(quantile_to_interval,numeric) S3method(score,default) -S3method(score,scoringutils_binary) -S3method(score,scoringutils_point) -S3method(score,scoringutils_quantile) -S3method(score,scoringutils_sample) -S3method(validate_forecast,scoringutils_binary) -S3method(validate_forecast,scoringutils_point) -S3method(validate_forecast,scoringutils_quantile) -S3method(validate_forecast,scoringutils_sample) +S3method(score,forecast_binary) +S3method(score,forecast_point) +S3method(score,forecast_quantile) +S3method(score,forecast_sample) +S3method(validate_forecast,forecast_binary) +S3method(validate_forecast,forecast_point) +S3method(validate_forecast,forecast_quantile) +S3method(validate_forecast,forecast_sample) export(abs_error) export(add_coverage) export(add_pairwise_comparison) @@ -41,7 +41,7 @@ export(mad_sample) export(make_NA) export(make_na) export(merge_pred_and_obs) -export(new_scoringutils) +export(new_forecast) export(overprediction) export(pairwise_comparison) export(pit) diff --git a/NEWS.md b/NEWS.md index b7fd1d529..e77c172c7 100644 --- a/NEWS.md +++ b/NEWS.md @@ -6,6 +6,7 @@ The update introduces breaking changes. If you want to keep using the older vers ## Package updates - In `score()`, required columns "true_value" and "prediction" were renamed and replaced by required columns "observed" and "predicted". Scoring functions now also use the function arguments "observed" and "predicted" everywhere consistently. +- The overall scoring workflow was updated. `score()` is now a generic function that dispatches the correct method based on the forecast type. forecast types currently supported are "binary", "point", "sample" and "quantile" with corresponding classes "forecast_binary", "forecast_point", "forecast_sample" and "forecast_quantile". An object of class `forecast_*` can be created using the function `as_forecast()`, which also replaces the previous function `check_forecasts()` (see more information below). - Scoring functions received a consistent interface and input checks: - metrics for binary forecasts: - `observed`: factor with exactly 2 levels diff --git a/R/get_-functions.R b/R/get_-functions.R index e889911b4..f68434651 100644 --- a/R/get_-functions.R +++ b/R/get_-functions.R @@ -250,7 +250,7 @@ get_duplicate_forecasts <- function(data, forecast_unit = NULL) { #' @title Get a list of all attributes of a scoringutils object #' -#' @param object A object of class `scoringutils_` +#' @param object A object of class `forecast_` #' #' @return A named list with the attributes of that object. #' @keywords internal diff --git a/R/score.R b/R/score.R index 30d717b1a..6c1298d90 100644 --- a/R/score.R +++ b/R/score.R @@ -70,13 +70,13 @@ score <- function(data, ...) { score.default <- function(data, ...) { assert(check_data_columns(data)) forecast_type <- get_forecast_type(data) - data <- new_scoringutils(data, paste0("scoringutils_", forecast_type)) + data <- new_forecast(data, paste0("forecast_", forecast_type)) score(data, ...) } #' @rdname score #' @export -score.scoringutils_binary <- function(data, metrics = metrics_binary, ...) { +score.forecast_binary <- function(data, metrics = metrics_binary, ...) { data <- validate_forecast(data) data <- remove_na_observed_predicted(data) metrics <- validate_metrics(metrics) @@ -96,7 +96,7 @@ score.scoringutils_binary <- function(data, metrics = metrics_binary, ...) { #' @importFrom Metrics se ae ape #' @rdname score #' @export -score.scoringutils_point <- function(data, metrics = metrics_point, ...) { +score.forecast_point <- function(data, metrics = metrics_point, ...) { data <- validate_forecast(data) data <- remove_na_observed_predicted(data) metrics <- validate_metrics(metrics) @@ -113,7 +113,7 @@ score.scoringutils_point <- function(data, metrics = metrics_point, ...) { #' @rdname score #' @export -score.scoringutils_sample <- function(data, metrics = metrics_sample, ...) { +score.forecast_sample <- function(data, metrics = metrics_sample, ...) { data <- validate_forecast(data) data <- remove_na_observed_predicted(data) forecast_unit <- attr(data, "forecast_unit") @@ -150,7 +150,7 @@ score.scoringutils_sample <- function(data, metrics = metrics_sample, ...) { #' @importFrom data.table `:=` as.data.table rbindlist %like% #' @rdname score #' @export -score.scoringutils_quantile <- function(data, metrics = metrics_quantile, ...) { +score.forecast_quantile <- function(data, metrics = metrics_quantile, ...) { data <- validate_forecast(data) data <- remove_na_observed_predicted(data) forecast_unit <- attr(data, "forecast_unit") @@ -176,7 +176,9 @@ score.scoringutils_quantile <- function(data, metrics = metrics_quantile, ...) { observed <- data$observed predicted <- do.call(rbind, data$predicted) quantile <- unlist(unique(data$quantile)) - data[, c("observed", "predicted", "quantile", "scoringutils_quantile") := NULL] + data[, c( + "observed", "predicted", "quantile", "scoringutils_quantile" + ) := NULL] data <- apply_metrics( data, metrics, diff --git a/R/validate.R b/R/validate.R index 65da31eba..4a1be5f1f 100644 --- a/R/validate.R +++ b/R/validate.R @@ -6,14 +6,14 @@ #' quantile-based) from the input data (using the function #' [get_forecast_type()]. It then constructs an object of the #' appropriate class (`forecast_binary`, `forecast_point`, `forecast_sample`, or -#' `forecast_quantile`, using the function [new_scoringutils()]). +#' `forecast_quantile`, using the function [new_forecast()]). #' Lastly, it calls [as_forecast()] on the object to make sure it conforms with #' the required input formats. #' @inheritParams score #' @inheritSection forecast_types Forecast types and input format #' @return Depending on the forecast type, an object of class -#' `scoringutils_binary`, `scoringutils_point`, `scoringutils_sample` or -#' `scoringutils_quantile`. +#' `forecast_binary`, `forecast_point`, `forecast_sample` or +#' `forecast_quantile`. #' @export #' @keywords check-forecasts #' @examples @@ -32,7 +32,7 @@ as_forecast.default <- function(data, ...) { forecast_type <- get_forecast_type(data) # construct class - data <- new_scoringutils(data, paste0("scoringutils_", forecast_type)) + data <- new_forecast(data, paste0("forecast_", forecast_type)) # validate class validate_forecast(data) @@ -48,8 +48,8 @@ as_forecast.default <- function(data, ...) { #' @inheritParams score #' @inheritSection forecast_types Forecast types and input format #' @return Depending on the forecast type, an object of class -#' `scoringutils_binary`, `scoringutils_point`, `scoringutils_sample` or -#' `scoringutils_quantile`. +#' `forecast_binary`, `forecast_point`, `forecast_sample` or +#' `forecast_quantile`. #' @importFrom data.table ':=' is.data.table #' @importFrom checkmate assert_data_frame #' @export @@ -62,10 +62,9 @@ validate_forecast <- function(data, ...) { } -#' @rdname validate #' @export #' @keywords check-forecasts -validate_forecast.scoringutils_binary <- function(data, ...) { +validate_forecast.forecast_binary <- function(data, ...) { data <- validate_general(data) columns_correct <- test_columns_not_present(data, c("sample_id", "quantile")) @@ -83,10 +82,10 @@ validate_forecast.scoringutils_binary <- function(data, ...) { return(data[]) } -#' @rdname validate + #' @export #' @keywords check-forecasts -validate_forecast.scoringutils_point <- function(data, ...) { +validate_forecast.forecast_point <- function(data, ...) { data <- validate_general(data) input_check <- check_input_point(data$observed, data$predicted) @@ -98,22 +97,24 @@ validate_forecast.scoringutils_point <- function(data, ...) { return(data[]) } -#' @rdname validate + #' @export -validate_forecast.scoringutils_quantile <- function(data, ...) { +#' @keywords check-forecasts +validate_forecast.forecast_quantile <- function(data, ...) { data <- validate_general(data) assert_numeric(data$quantile, lower = 0, upper = 1) return(data[]) } -#' @rdname validate + #' @export #' @keywords check-forecasts -validate_forecast.scoringutils_sample <- function(data, ...) { +validate_forecast.forecast_sample <- function(data, ...) { data <- validate_general(data) return(data[]) } + #' @title Apply scoringutls input checks that are the same across forecast types #' #' @description @@ -181,7 +182,7 @@ validate_general <- function(data) { #' @return An object of the class indicated by `classname` #' @export #' @keywords internal -new_scoringutils <- function(data, classname) { +new_forecast <- function(data, classname) { data <- as.data.table(data) data <- assure_model_column(data) class(data) <- c(classname, class(data)) diff --git a/man/as_forecast.Rd b/man/as_forecast.Rd index 11e6091ce..3704364a8 100644 --- a/man/as_forecast.Rd +++ b/man/as_forecast.Rd @@ -16,8 +16,8 @@ as_forecast(data, ...) } \value{ Depending on the forecast type, an object of class -\code{scoringutils_binary}, \code{scoringutils_point}, \code{scoringutils_sample} or -\code{scoringutils_quantile}. +\code{forecast_binary}, \code{forecast_point}, \code{forecast_sample} or +\code{forecast_quantile}. } \description{ Convert a data.frame or similar of forecasts into an object of @@ -27,7 +27,7 @@ class \verb{forecast_*} and validate it. quantile-based) from the input data (using the function \code{\link[=get_forecast_type]{get_forecast_type()}}. It then constructs an object of the appropriate class (\code{forecast_binary}, \code{forecast_point}, \code{forecast_sample}, or -\code{forecast_quantile}, using the function \code{\link[=new_scoringutils]{new_scoringutils()}}). +\code{forecast_quantile}, using the function \code{\link[=new_forecast]{new_forecast()}}). Lastly, it calls \code{\link[=as_forecast]{as_forecast()}} on the object to make sure it conforms with the required input formats. } diff --git a/man/get_scoringutils_attributes.Rd b/man/get_scoringutils_attributes.Rd index 82ce6dcbe..5f2770a6a 100644 --- a/man/get_scoringutils_attributes.Rd +++ b/man/get_scoringutils_attributes.Rd @@ -7,7 +7,7 @@ get_scoringutils_attributes(object) } \arguments{ -\item{object}{A object of class \code{scoringutils_}} +\item{object}{A object of class \code{forecast_}} } \value{ A named list with the attributes of that object. diff --git a/man/new_scoringutils.Rd b/man/new_forecast.Rd similarity index 88% rename from man/new_scoringutils.Rd rename to man/new_forecast.Rd index e19feb989..772c5e1b4 100644 --- a/man/new_scoringutils.Rd +++ b/man/new_forecast.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/validate.R -\name{new_scoringutils} -\alias{new_scoringutils} +\name{new_forecast} +\alias{new_forecast} \title{Class constructor for scoringutils objects} \usage{ -new_scoringutils(data, classname) +new_forecast(data, classname) } \arguments{ \item{data}{A data.frame or data.table with predicted and observed values.} diff --git a/man/score.Rd b/man/score.Rd index 92b484bed..d317ed64a 100644 --- a/man/score.Rd +++ b/man/score.Rd @@ -3,23 +3,23 @@ \name{score} \alias{score} \alias{score.default} -\alias{score.scoringutils_binary} -\alias{score.scoringutils_point} -\alias{score.scoringutils_sample} -\alias{score.scoringutils_quantile} +\alias{score.forecast_binary} +\alias{score.forecast_point} +\alias{score.forecast_sample} +\alias{score.forecast_quantile} \title{Evaluate forecasts in a data.frame format} \usage{ score(data, ...) \method{score}{default}(data, ...) -\method{score}{scoringutils_binary}(data, metrics = metrics_binary, ...) +\method{score}{forecast_binary}(data, metrics = metrics_binary, ...) -\method{score}{scoringutils_point}(data, metrics = metrics_point, ...) +\method{score}{forecast_point}(data, metrics = metrics_point, ...) -\method{score}{scoringutils_sample}(data, metrics = metrics_sample, ...) +\method{score}{forecast_sample}(data, metrics = metrics_sample, ...) -\method{score}{scoringutils_quantile}(data, metrics = metrics_quantile, ...) +\method{score}{forecast_quantile}(data, metrics = metrics_quantile, ...) } \arguments{ \item{data}{A data.frame or data.table with predicted and observed values.} diff --git a/man/validate_forecast.Rd b/man/validate_forecast.Rd index 5d4bbe2d7..542e84dd5 100644 --- a/man/validate_forecast.Rd +++ b/man/validate_forecast.Rd @@ -13,8 +13,8 @@ validate_forecast(data, ...) } \value{ Depending on the forecast type, an object of class -\code{scoringutils_binary}, \code{scoringutils_point}, \code{scoringutils_sample} or -\code{scoringutils_quantile}. +\code{forecast_binary}, \code{forecast_point}, \code{forecast_sample} or +\code{forecast_quantile}. } \description{ Methods for the different classes run \code{\link[=validate_general]{validate_general()}}, which performs diff --git a/tests/testthat/test-check_forecasts.R b/tests/testthat/test-check_forecasts.R index 1441c3f7f..004031d71 100644 --- a/tests/testthat/test-check_forecasts.R +++ b/tests/testthat/test-check_forecasts.R @@ -1,6 +1,6 @@ test_that("as_forecast() function works", { check <- suppressMessages(as_forecast(example_quantile)) - expect_s3_class(check, "scoringutils_quantile") + expect_s3_class(check, "forecast_quantile") }) test_that("as_forecast() function has an error for empty data.frame", { diff --git a/tests/testthat/test-metrics-point.R b/tests/testthat/test-metrics-point.R index 2f64226df..54aeb623c 100644 --- a/tests/testthat/test-metrics-point.R +++ b/tests/testthat/test-metrics-point.R @@ -179,9 +179,9 @@ test_that("abs error is correct, point and median forecasts same", { observations = truth_scoringutils ) - data_scoringutils_point <- data_scoringutils[type == "point"][, quantile := NULL] + data_forecast_point <- data_scoringutils[type == "point"][, quantile := NULL] - eval <- score(data = data_scoringutils_point) + eval <- score(data = data_forecast_point) eval <- summarise_scores(eval, by = c( "location", "target_end_date", diff --git a/tests/testthat/test-score.R b/tests/testthat/test-score.R index a67575984..29132de21 100644 --- a/tests/testthat/test-score.R +++ b/tests/testthat/test-score.R @@ -38,7 +38,7 @@ test_that("function produces output for a binary case", { expect_true("brier_score" %in% names(eval)) }) -test_that("score.scoringutils_binary() errors with only NA values", { +test_that("score.forecast_binary() errors with only NA values", { only_nas <- copy(example_binary)[, predicted := NA_real_] expect_error( score(only_nas), @@ -156,7 +156,7 @@ test_that("Changing metrics names works", { }) -test_that("score.scoringutils_point() errors with only NA values", { +test_that("score.forecast_point() errors with only NA values", { only_nas <- copy(example_point)[, predicted := NA_real_] expect_error( score(only_nas), @@ -239,7 +239,7 @@ test_that("WIS is the same with other metrics omitted or included", { }) -test_that("score.scoringutils_quantile() errors with only NA values", { +test_that("score.forecast_quantile() errors with only NA values", { only_nas <- copy(example_quantile)[, predicted := NA_real_] expect_error( score(only_nas),