Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions R/class-forecast-quantile.R
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ get_metrics.forecast_quantile <- function(x, select = NULL, exclude = NULL, ...)
dispersion = dispersion_quantile,
bias = bias_quantile,
interval_coverage_50 = interval_coverage,
interval_coverage_90 = purrr::partial(
interval_coverage_90 = partial(
interval_coverage, interval_range = 90
),
ae_median = ae_median_quantile
Expand Down Expand Up @@ -239,7 +239,7 @@ get_pit_histogram.forecast_quantile <- function(forecast, num_bins = NULL,
diffs <- round(diff(quantiles), 10)

if (length(setdiff(quantiles, present_quantiles)) > 0) {
cli::cli_warn(
cli_warn(
"Some requested quantiles are missing in the forecast. ",
"The PIT histogram will be based on the quantiles present in the forecast."
)
Expand Down
2 changes: 1 addition & 1 deletion R/class-forecast-sample.R
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ get_pit_histogram.forecast_sample <- function(forecast, num_bins = 10,
quantiles <- unique(c(0, breaks, 1))
}

forecast_wide <- data.table::dcast(
forecast_wide <- dcast(
forecast,
... ~ paste0("InternalSampl_", sample_id),
value.var = "predicted"
Expand Down
2 changes: 1 addition & 1 deletion R/class-forecast.R
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ is_forecast <- function(x) {
# print.data.table will internally subset the data.table before printing.
# this subsetting triggers the validation, which is not desired in this case.
# this is a hack and ideally, we'd do things differently.
if (nrow(out) > 30 && data.table::is.data.table(out) && !is_dt_force_print) {
if (nrow(out) > 30 && is.data.table(out) && !is_dt_force_print) {
# check whether subset object passes validation
validation <- try(
assert_forecast(forecast = out, verbose = FALSE),
Expand Down
4 changes: 2 additions & 2 deletions R/get-correlations.R
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,8 @@ plot_correlations <- function(correlations, digits = NULL) {
rownames(lower_triangle) <- colnames(lower_triangle)

# get plot data.frame
plot_df <- data.table::as.data.table(lower_triangle)[, metric := metrics]
plot_df <- na.omit(data.table::melt(plot_df, id.vars = "metric"))
plot_df <- as.data.table(lower_triangle)[, metric := metrics]
plot_df <- na.omit(melt(plot_df, id.vars = "metric"))

# refactor levels according to the metrics
plot_df[, metric := factor(metric, levels = metrics)]
Expand Down
2 changes: 1 addition & 1 deletion R/helper-quantile-interval-range.R
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ quantile_to_interval_dataframe <- function(forecast,
median <- forecast[quantile_level == 0.5, ]
median[, boundary := "upper"]

forecast <- data.table::rbindlist(list(forecast, median))
forecast <- rbindlist(list(forecast, median))
if (!keep_quantile_col) {
forecast[, quantile_level := NULL]
}
Expand Down
2 changes: 1 addition & 1 deletion R/metrics-ordinal.R
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,6 @@ rps_ordinal <- function(observed, predicted, predicted_label) {
correct_order <- as.numeric(predicted_label)
ordered_predicted <- predicted[, correct_order]

rps <- scoringRules::rps_probs(as.numeric(observed), ordered_predicted)
rps <- rps_probs(as.numeric(observed), ordered_predicted)
return(rps)
}
2 changes: 1 addition & 1 deletion R/metrics-sample.R
Original file line number Diff line number Diff line change
Expand Up @@ -547,7 +547,7 @@ pit_histogram_sample <- function(observed,
}

if (integers != "random" && !is.null(n_replicates)) {
cli::cli_warn("`n_replicates` is ignored when `integers` is not `random`")
cli_warn("`n_replicates` is ignored when `integers` is not `random`")
}

# calculate PIT-values -------------------------------------------------------
Expand Down
16 changes: 8 additions & 8 deletions R/pairwise-comparisons.R
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ get_pairwise_comparisons <- function(
}
)

out <- data.table::rbindlist(results)
out <- rbindlist(results)

return(out[])
}
Expand Down Expand Up @@ -298,7 +298,7 @@ pairwise_comparison_one_group <- function(scores,
# be the same

# set up initial data.frame with all possible pairwise comparisons
combinations <- data.table::as.data.table(t(combn(comparators, m = 2)))
combinations <- as.data.table(t(combn(comparators, m = 2)))
colnames(combinations) <- c("..compare", "compare_against")

combinations[, c("ratio", "pval") := compare_forecasts(
Expand All @@ -316,22 +316,22 @@ pairwise_comparison_one_group <- function(scores,
combinations[, adj_pval := p.adjust(pval)]

# mirror computations
combinations_mirrored <- data.table::copy(combinations)
combinations_mirrored <- copy(combinations)
setnames(combinations_mirrored,
old = c("..compare", "compare_against"),
new = c("compare_against", "..compare")
)
combinations_mirrored[, ratio := 1 / ratio]

# add a one for those that are the same
combinations_equal <- data.table::data.table(
combinations_equal <- data.table(
..compare = comparators,
compare_against = comparators,
ratio = 1,
pval = 1,
adj_pval = 1
)
result <- data.table::rbindlist(list(
result <- rbindlist(list(
combinations,
combinations_mirrored,
combinations_equal
Expand Down Expand Up @@ -374,15 +374,15 @@ pairwise_comparison_one_group <- function(scores,
out <- merge(scores, result, by = compare, all = TRUE)

# rename ratio to mean_scores_ratio
data.table::setnames(out,
setnames(out,
old = c("ratio", "theta"),
new = c(
"mean_scores_ratio",
paste(metric, "relative_skill", sep = "_")
)
)
if (!is.null(baseline)) {
data.table::setnames(out,
setnames(out,
old = "rel_to_baseline",
new = paste(metric, "scaled_relative_skill", sep = "_")
)
Expand Down Expand Up @@ -429,7 +429,7 @@ compare_forecasts <- function(scores,
one_sided = FALSE,
test_type = c("non_parametric", "permutation", NULL),
n_permutations = 999) {
scores <- data.table::as.data.table(scores)
scores <- as.data.table(scores)

forecast_unit <- get_forecast_unit(scores)

Expand Down
99 changes: 99 additions & 0 deletions tests/testthat/test-namespace-imports.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
# Tests to verify NAMESPACE imports are correct and that removing redundant
# `pkg::fn()` qualifiers doesn't cause regressions. See issue #445.

test_that("all functions used without :: prefix are properly imported in NAMESPACE", {
ns <- asNamespace("scoringutils")

# data.table functions
expect_true(exists("as.data.table", envir = ns, inherits = TRUE))
expect_true(exists("melt", envir = ns, inherits = TRUE))
expect_true(exists("dcast", envir = ns, inherits = TRUE))
expect_true(exists("rbindlist", envir = ns, inherits = TRUE))
expect_true(exists("copy", envir = ns, inherits = TRUE))
expect_true(exists("data.table", envir = ns, inherits = TRUE))
expect_true(exists("setnames", envir = ns, inherits = TRUE))
expect_true(exists("is.data.table", envir = ns, inherits = TRUE))

# scoringRules functions
expect_true(exists("logs_sample", envir = ns, inherits = TRUE))
expect_true(exists("dss_sample", envir = ns, inherits = TRUE))
expect_true(exists("crps_sample", envir = ns, inherits = TRUE))
expect_true(exists("rps_probs", envir = ns, inherits = TRUE))

# cli, purrr, utils
expect_true(exists("cli_warn", envir = ns, inherits = TRUE))
expect_true(exists("partial", envir = ns, inherits = TRUE))
expect_true(exists("tail", envir = ns, inherits = TRUE))
})

test_that("scoringRules wrapper functions resolve to correct implementations", {
set.seed(42)
observed <- rpois(10, lambda = 5)
predicted <- replicate(50, rpois(10, lambda = 5))

result_logs <- logs_sample(observed, predicted)
expect_type(result_logs, "double")
expect_length(result_logs, 10)

result_dss <- dss_sample(observed, predicted)
expect_type(result_dss, "double")
expect_length(result_dss, 10)

result_crps <- crps_sample(observed, predicted)
expect_type(result_crps, "double")
expect_length(result_crps, 10)
})

test_that("get_correlations() produces correct output after namespace cleanup", {
result <- suppressMessages(
get_correlations(summarise_scores(
scores_quantile,
by = get_forecast_unit(scores_quantile)
))
)
expect_s3_class(result, c("scores", "data.table", "data.frame"), exact = TRUE)
expect_true(nrow(result) > 0)

Check warning on line 55 in tests/testthat/test-namespace-imports.R

View workflow job for this annotation

GitHub Actions / lint-changed-files

file=tests/testthat/test-namespace-imports.R,line=55,col=3,[expect_comparison_linter] expect_gt(x, y) is better than expect_true(x > y).
})

test_that("pairwise comparisons produce correct output after namespace cleanup", {
result <- suppressMessages(
get_pairwise_comparisons(scores_quantile,
baseline = "EuroCOVIDhub-baseline"
)
)
expect_s3_class(result, "data.table")
expect_true(nrow(result) > 0)

Check warning on line 65 in tests/testthat/test-namespace-imports.R

View workflow job for this annotation

GitHub Actions / lint-changed-files

file=tests/testthat/test-namespace-imports.R,line=65,col=3,[expect_comparison_linter] expect_gt(x, y) is better than expect_true(x > y).
expected_cols <- c("model", "compare_against", "mean_scores_ratio",
"pval", "adj_pval")
for (col in expected_cols) {
expect_true(col %in% names(result),
info = paste("Missing column:", col))
}
})

test_that("PIT histogram for sample forecasts works after namespace cleanup", {
result <- suppressMessages(
get_pit_histogram(as_forecast_sample(example_sample_continuous))
)
expect_s3_class(result, "data.table")
expect_true("density" %in% names(result))
expect_true(all(result$density >= 0))
})

test_that("quantile_to_interval conversion works after namespace cleanup", {
result <- suppressMessages(
score(as_forecast_quantile(na.omit(example_quantile)))
)
expect_s3_class(result, "scores")
expect_true("wis" %in% names(result))
expect_true("interval_coverage_50" %in% names(result))
expect_true("interval_coverage_90" %in% names(result))
})

test_that("get_metrics for quantile forecasts resolves partial() correctly", {
metrics <- suppressMessages(
get_metrics(as_forecast_quantile(na.omit(example_quantile)))
)
expect_true("interval_coverage_90" %in% names(metrics))
expect_true(is.function(metrics$interval_coverage_90))

Check warning on line 98 in tests/testthat/test-namespace-imports.R

View workflow job for this annotation

GitHub Actions / lint-changed-files

file=tests/testthat/test-namespace-imports.R,line=98,col=3,[expect_type_linter] expect_type(x, t) is better than expect_true(is.<t>(x))
})
Loading