diff --git a/R/class-forecast-quantile.R b/R/class-forecast-quantile.R index 6d6c1bd3d..f5e61f58d 100644 --- a/R/class-forecast-quantile.R +++ b/R/class-forecast-quantile.R @@ -205,7 +205,7 @@ get_metrics.forecast_quantile <- function(x, select = NULL, exclude = NULL, ...) dispersion = dispersion_quantile, bias = bias_quantile, interval_coverage_50 = interval_coverage, - interval_coverage_90 = purrr::partial( + interval_coverage_90 = partial( interval_coverage, interval_range = 90 ), ae_median = ae_median_quantile @@ -239,7 +239,7 @@ get_pit_histogram.forecast_quantile <- function(forecast, num_bins = NULL, diffs <- round(diff(quantiles), 10) if (length(setdiff(quantiles, present_quantiles)) > 0) { - cli::cli_warn( + cli_warn( "Some requested quantiles are missing in the forecast. ", "The PIT histogram will be based on the quantiles present in the forecast." ) diff --git a/R/class-forecast-sample.R b/R/class-forecast-sample.R index 5bf50d865..c554aebfb 100644 --- a/R/class-forecast-sample.R +++ b/R/class-forecast-sample.R @@ -214,7 +214,7 @@ get_pit_histogram.forecast_sample <- function(forecast, num_bins = 10, quantiles <- unique(c(0, breaks, 1)) } - forecast_wide <- data.table::dcast( + forecast_wide <- dcast( forecast, ... ~ paste0("InternalSampl_", sample_id), value.var = "predicted" diff --git a/R/class-forecast.R b/R/class-forecast.R index 2d9e46d70..45fabd405 100644 --- a/R/class-forecast.R +++ b/R/class-forecast.R @@ -284,7 +284,7 @@ is_forecast <- function(x) { # print.data.table will internally subset the data.table before printing. # this subsetting triggers the validation, which is not desired in this case. # this is a hack and ideally, we'd do things differently. - if (nrow(out) > 30 && data.table::is.data.table(out) && !is_dt_force_print) { + if (nrow(out) > 30 && is.data.table(out) && !is_dt_force_print) { # check whether subset object passes validation validation <- try( assert_forecast(forecast = out, verbose = FALSE), diff --git a/R/get-correlations.R b/R/get-correlations.R index 77892bd63..d5ef5af8f 100644 --- a/R/get-correlations.R +++ b/R/get-correlations.R @@ -100,8 +100,8 @@ plot_correlations <- function(correlations, digits = NULL) { rownames(lower_triangle) <- colnames(lower_triangle) # get plot data.frame - plot_df <- data.table::as.data.table(lower_triangle)[, metric := metrics] - plot_df <- na.omit(data.table::melt(plot_df, id.vars = "metric")) + plot_df <- as.data.table(lower_triangle)[, metric := metrics] + plot_df <- na.omit(melt(plot_df, id.vars = "metric")) # refactor levels according to the metrics plot_df[, metric := factor(metric, levels = metrics)] diff --git a/R/helper-quantile-interval-range.R b/R/helper-quantile-interval-range.R index cba7b5ff7..ff1cba269 100644 --- a/R/helper-quantile-interval-range.R +++ b/R/helper-quantile-interval-range.R @@ -74,7 +74,7 @@ quantile_to_interval_dataframe <- function(forecast, median <- forecast[quantile_level == 0.5, ] median[, boundary := "upper"] - forecast <- data.table::rbindlist(list(forecast, median)) + forecast <- rbindlist(list(forecast, median)) if (!keep_quantile_col) { forecast[, quantile_level := NULL] } diff --git a/R/metrics-ordinal.R b/R/metrics-ordinal.R index d3bc50c4e..03e87e191 100644 --- a/R/metrics-ordinal.R +++ b/R/metrics-ordinal.R @@ -75,6 +75,6 @@ rps_ordinal <- function(observed, predicted, predicted_label) { correct_order <- as.numeric(predicted_label) ordered_predicted <- predicted[, correct_order] - rps <- scoringRules::rps_probs(as.numeric(observed), ordered_predicted) + rps <- rps_probs(as.numeric(observed), ordered_predicted) return(rps) } diff --git a/R/metrics-sample.R b/R/metrics-sample.R index 1421afca7..b5a220bfc 100644 --- a/R/metrics-sample.R +++ b/R/metrics-sample.R @@ -547,7 +547,7 @@ pit_histogram_sample <- function(observed, } if (integers != "random" && !is.null(n_replicates)) { - cli::cli_warn("`n_replicates` is ignored when `integers` is not `random`") + cli_warn("`n_replicates` is ignored when `integers` is not `random`") } # calculate PIT-values ------------------------------------------------------- diff --git a/R/pairwise-comparisons.R b/R/pairwise-comparisons.R index 2e8b00b10..3e416d39e 100644 --- a/R/pairwise-comparisons.R +++ b/R/pairwise-comparisons.R @@ -249,7 +249,7 @@ get_pairwise_comparisons <- function( } ) - out <- data.table::rbindlist(results) + out <- rbindlist(results) return(out[]) } @@ -298,7 +298,7 @@ pairwise_comparison_one_group <- function(scores, # be the same # set up initial data.frame with all possible pairwise comparisons - combinations <- data.table::as.data.table(t(combn(comparators, m = 2))) + combinations <- as.data.table(t(combn(comparators, m = 2))) colnames(combinations) <- c("..compare", "compare_against") combinations[, c("ratio", "pval") := compare_forecasts( @@ -316,7 +316,7 @@ pairwise_comparison_one_group <- function(scores, combinations[, adj_pval := p.adjust(pval)] # mirror computations - combinations_mirrored <- data.table::copy(combinations) + combinations_mirrored <- copy(combinations) setnames(combinations_mirrored, old = c("..compare", "compare_against"), new = c("compare_against", "..compare") @@ -324,14 +324,14 @@ pairwise_comparison_one_group <- function(scores, combinations_mirrored[, ratio := 1 / ratio] # add a one for those that are the same - combinations_equal <- data.table::data.table( + combinations_equal <- data.table( ..compare = comparators, compare_against = comparators, ratio = 1, pval = 1, adj_pval = 1 ) - result <- data.table::rbindlist(list( + result <- rbindlist(list( combinations, combinations_mirrored, combinations_equal @@ -374,7 +374,7 @@ pairwise_comparison_one_group <- function(scores, out <- merge(scores, result, by = compare, all = TRUE) # rename ratio to mean_scores_ratio - data.table::setnames(out, + setnames(out, old = c("ratio", "theta"), new = c( "mean_scores_ratio", @@ -382,7 +382,7 @@ pairwise_comparison_one_group <- function(scores, ) ) if (!is.null(baseline)) { - data.table::setnames(out, + setnames(out, old = "rel_to_baseline", new = paste(metric, "scaled_relative_skill", sep = "_") ) @@ -429,7 +429,7 @@ compare_forecasts <- function(scores, one_sided = FALSE, test_type = c("non_parametric", "permutation", NULL), n_permutations = 999) { - scores <- data.table::as.data.table(scores) + scores <- as.data.table(scores) forecast_unit <- get_forecast_unit(scores) diff --git a/tests/testthat/test-namespace-imports.R b/tests/testthat/test-namespace-imports.R new file mode 100644 index 000000000..1a527a3b2 --- /dev/null +++ b/tests/testthat/test-namespace-imports.R @@ -0,0 +1,99 @@ +# Tests to verify NAMESPACE imports are correct and that removing redundant +# `pkg::fn()` qualifiers doesn't cause regressions. See issue #445. + +test_that("all functions used without :: prefix are properly imported in NAMESPACE", { + ns <- asNamespace("scoringutils") + + # data.table functions + expect_true(exists("as.data.table", envir = ns, inherits = TRUE)) + expect_true(exists("melt", envir = ns, inherits = TRUE)) + expect_true(exists("dcast", envir = ns, inherits = TRUE)) + expect_true(exists("rbindlist", envir = ns, inherits = TRUE)) + expect_true(exists("copy", envir = ns, inherits = TRUE)) + expect_true(exists("data.table", envir = ns, inherits = TRUE)) + expect_true(exists("setnames", envir = ns, inherits = TRUE)) + expect_true(exists("is.data.table", envir = ns, inherits = TRUE)) + + # scoringRules functions + expect_true(exists("logs_sample", envir = ns, inherits = TRUE)) + expect_true(exists("dss_sample", envir = ns, inherits = TRUE)) + expect_true(exists("crps_sample", envir = ns, inherits = TRUE)) + expect_true(exists("rps_probs", envir = ns, inherits = TRUE)) + + # cli, purrr, utils + expect_true(exists("cli_warn", envir = ns, inherits = TRUE)) + expect_true(exists("partial", envir = ns, inherits = TRUE)) + expect_true(exists("tail", envir = ns, inherits = TRUE)) +}) + +test_that("scoringRules wrapper functions resolve to correct implementations", { + set.seed(42) + observed <- rpois(10, lambda = 5) + predicted <- replicate(50, rpois(10, lambda = 5)) + + result_logs <- logs_sample(observed, predicted) + expect_type(result_logs, "double") + expect_length(result_logs, 10) + + result_dss <- dss_sample(observed, predicted) + expect_type(result_dss, "double") + expect_length(result_dss, 10) + + result_crps <- crps_sample(observed, predicted) + expect_type(result_crps, "double") + expect_length(result_crps, 10) +}) + +test_that("get_correlations() produces correct output after namespace cleanup", { + result <- suppressMessages( + get_correlations(summarise_scores( + scores_quantile, + by = get_forecast_unit(scores_quantile) + )) + ) + expect_s3_class(result, c("scores", "data.table", "data.frame"), exact = TRUE) + expect_true(nrow(result) > 0) +}) + +test_that("pairwise comparisons produce correct output after namespace cleanup", { + result <- suppressMessages( + get_pairwise_comparisons(scores_quantile, + baseline = "EuroCOVIDhub-baseline" + ) + ) + expect_s3_class(result, "data.table") + expect_true(nrow(result) > 0) + expected_cols <- c("model", "compare_against", "mean_scores_ratio", + "pval", "adj_pval") + for (col in expected_cols) { + expect_true(col %in% names(result), + info = paste("Missing column:", col)) + } +}) + +test_that("PIT histogram for sample forecasts works after namespace cleanup", { + result <- suppressMessages( + get_pit_histogram(as_forecast_sample(example_sample_continuous)) + ) + expect_s3_class(result, "data.table") + expect_true("density" %in% names(result)) + expect_true(all(result$density >= 0)) +}) + +test_that("quantile_to_interval conversion works after namespace cleanup", { + result <- suppressMessages( + score(as_forecast_quantile(na.omit(example_quantile))) + ) + expect_s3_class(result, "scores") + expect_true("wis" %in% names(result)) + expect_true("interval_coverage_50" %in% names(result)) + expect_true("interval_coverage_90" %in% names(result)) +}) + +test_that("get_metrics for quantile forecasts resolves partial() correctly", { + metrics <- suppressMessages( + get_metrics(as_forecast_quantile(na.omit(example_quantile))) + ) + expect_true("interval_coverage_90" %in% names(metrics)) + expect_true(is.function(metrics$interval_coverage_90)) +})