evalclass / precrec
Showing 24 of 215 files from the diff.
Other files ignored by Codecov
man/auc.Rd has changed.
NAMESPACE has changed.
man/fortify.Rd has changed.
man/precrec.Rd has changed.
man/mmdata.Rd has changed.
man/auc_ci.Rd has changed.
man/part.Rd has changed.
man/plot.Rd has changed.
README.md has changed.
NEWS.md has changed.
man/evalmod.Rd has changed.
man/autoplot.Rd has changed.
man/pauc.Rd has changed.
appveyor.yml has changed.
README.Rmd has changed.
DESCRIPTION has changed.
cran-comments.md has changed.

@@ -80,7 +80,6 @@
Loading
80 80
#'
81 81
#' @export
82 82
format_nfold <- function(nfold_df, score_cols, lab_col, fold_col) {
83 -
84 83
  # Validate arguments
85 84
  .validate_format_nfold_args(nfold_df, score_cols, lab_col, fold_col)
86 85

@@ -422,11 +422,12 @@
Loading
422 422
#
423 423
.load_ggplot2 <- function() {
424 424
  if (!requireNamespace("ggplot2", quietly = TRUE)) {
425 -
    stop(paste(
426 -
      "ggplot2 is required to perform this function.",
427 -
      "Please install it."
428 -
    ),
429 -
    call. = FALSE
425 +
    stop(
426 +
      paste(
427 +
        "ggplot2 is required to perform this function.",
428 +
        "Please install it."
429 +
      ),
430 +
      call. = FALSE
430 431
    )
431 432
  }
432 433
}
@@ -460,11 +461,12 @@
Loading
460 461
  if (requireNamespace("patchwork", quietly = TRUE)) {
461 462
    return(TRUE)
462 463
  } else {
463 -
    warning(paste0(
464 -
      "patchwork is not installed. ",
465 -
      "grid and gridExtra will be used instead."
466 -
    ),
467 -
    call. = FALSE
464 +
    warning(
465 +
      paste0(
466 +
        "patchwork is not installed. ",
467 +
        "grid and gridExtra will be used instead."
468 +
      ),
469 +
      call. = FALSE
468 470
    )
469 471
    return(FALSE)
470 472
  }
@@ -642,47 +644,55 @@
Loading
642 644
  )
643 645
644 646
  # === Create a ggplot object ===
647 +
  x_col <- rlang::sym("x")
648 +
  y_col <- rlang::sym("y")
649 +
  ymin_col <- rlang::sym("ymin")
650 +
  ymax_col <- rlang::sym("ymax")
651 +
  modname_col <- rlang::sym("modname")
652 +
  dsid_modname_col <- rlang::sym("dsid_modname")
645 653
  if (show_cb) {
646 654
    p <- ggplot2::ggplot(
647 655
      curve_df,
648 -
      ggplot2::aes_string(
649 -
        x = "x", y = "y",
650 -
        ymin = "ymin", ymax = "ymax"
656 +
      ggplot2::aes(
657 +
        x = !!x_col, y = !!y_col,
658 +
        ymin = !!ymin_col, ymax = !!ymax_col
651 659
      )
652 660
    )
653 661
    if (type == "l") {
654 -
      p <- p + ggplot2::geom_smooth(ggplot2::aes_string(color = "modname"),
662 +
      p <- p + ggplot2::geom_smooth(ggplot2::aes(color = !!modname_col),
655 663
        stat = "identity", na.rm = TRUE,
656 -
        size = 0.5
664 +
        linewidth = 0.5
657 665
      )
658 666
    } else if (type == "b" || type == "p") {
659 -
      p <- p + ggplot2::geom_ribbon(ggplot2::aes_string(
660 -
        ymin = "ymin",
661 -
        ymax = "ymax",
662 -
        group = "modname"
663 -
      ),
664 -
      stat = "identity", alpha = 0.25,
665 -
      fill = "grey25", na.rm = TRUE
667 +
      p <- p + ggplot2::geom_ribbon(
668 +
        ggplot2::aes(
669 +
          ymin = !!ymin_col,
670 +
          ymax = !!ymax_col,
671 +
          group = !!modname_col
672 +
        ),
673 +
        stat = "identity", alpha = 0.25,
674 +
        fill = "grey25", na.rm = TRUE
666 675
      )
667 676
      if (type == "b") {
668 -
        p <- p + ggplot2::geom_line(ggplot2::aes_string(color = "modname"),
677 +
        p <- p + ggplot2::geom_line(ggplot2::aes(color = !!modname_col),
669 678
          alpha = 0.25, na.rm = TRUE
670 679
        )
671 680
      }
672 -
      p <- p + ggplot2::geom_point(ggplot2::aes_string(
673 -
        x = "x", y = "y",
674 -
        color = "modname"
675 -
      ),
676 -
      na.rm = TRUE
681 +
      p <- p + ggplot2::geom_point(
682 +
        ggplot2::aes(
683 +
          x = !!x_col, y = !!y_col,
684 +
          color = !!modname_col
685 +
        ),
686 +
        na.rm = TRUE
677 687
      )
678 688
    }
679 689
  } else if (raw_curves) {
680 690
    p <- ggplot2::ggplot(
681 691
      curve_df,
682 -
      ggplot2::aes_string(
683 -
        x = "x", y = "y",
684 -
        group = "dsid_modname",
685 -
        color = "modname"
692 +
      ggplot2::aes(
693 +
        x = !!x_col, y = !!y_col,
694 +
        group = !!dsid_modname_col,
695 +
        color = !!modname_col
686 696
      )
687 697
    )
688 698
@@ -695,9 +705,9 @@
Loading
695 705
      p <- p + ggplot2::geom_point(na.rm = TRUE)
696 706
    }
697 707
  } else {
698 -
    p <- ggplot2::ggplot(curve_df, ggplot2::aes_string(
699 -
      x = "x", y = "y",
700 -
      color = "modname"
708 +
    p <- ggplot2::ggplot(curve_df, ggplot2::aes(
709 +
      x = !!x_col, y = !!y_col,
710 +
      color = !!modname_col
701 711
    ))
702 712
    if (type == "l") {
703 713
      p <- p + ggplot2::geom_line(na.rm = TRUE)

@@ -143,7 +143,6 @@
Loading
143 143
# Summarize basic evaluation measures
144 144
#
145 145
.summarize_basic <- function(lpoints, mdat) {
146 -
147 146
  # Summarize AUC of ROC or PRC curves
148 147
  modnames <- attr(mdat, "data_info")[["modnames"]]
149 148
  dsids <- attr(mdat, "data_info")[["dsids"]]

@@ -3,7 +3,6 @@
Loading
3 3
#
4 4
create_curves <- function(pevals, scores = NULL, labels = NULL,
5 5
                          x_bins = 1000, keep_pevals = FALSE, ...) {
6 -
7 6
  # === Validate input arguments ===
8 7
  # Create pevals from scores and labels if pevals is missing
9 8
  pevals <- .create_src_obj(
@@ -54,7 +53,6 @@
Loading
54 53
#
55 54
create_roc <- function(pevals, scores = NULL, labels = NULL, x_bins = 1000,
56 55
                       keep_pevals = FALSE, ...) {
57 -
58 56
  # === Create a ROC curve ===
59 57
  .create_curve(
60 58
    "specificity", "sensitivity", create_roc_curve,
@@ -68,7 +66,6 @@
Loading
68 66
#
69 67
create_prc <- function(pevals, scores = NULL, labels = NULL, x_bins = 1000,
70 68
                       keep_pevals = FALSE, ...) {
71 -
72 69
  # === Create a Precision-Recall curve ===
73 70
  .create_curve(
74 71
    "sensitivity", "precision", create_prc_curve,
@@ -83,7 +80,6 @@
Loading
83 80
.create_curve <- function(x_name, y_name, func, func_name, class_name,
84 81
                          pevals, scores = NULL, labels = NULL, x_bins = 1000,
85 82
                          keep_pevals = FALSE, ...) {
86 -
87 83
  # === Validate input arguments ===
88 84
  # Create pevals from scores and labels if pevals is missing
89 85
  pevals <- .create_src_obj(

@@ -5,9 +5,11 @@
Loading
5 5
  curve_df <- .prepare_autoplot(object)
6 6
7 7
  # === Create a ggplot object ===
8 +
  x_col <- rlang::sym("x")
9 +
  y_col <- rlang::sym("y")
8 10
  p <- ggplot2::ggplot(
9 11
    curve_df,
10 -
    ggplot2::aes_string(x = "x", y = "y", color = "x")
12 +
    ggplot2::aes(x = !!x_col, y = !!y_col, color = !!x_col)
11 13
  )
12 14
  p <- p + ggplot2::geom_jitter()
13 15
  p <- p + ggplot2::coord_flip()
@@ -23,9 +25,12 @@
Loading
23 25
  curve_df <- .prepare_autoplot(object)
24 26
25 27
  # === Create a ggplot object ===
28 +
  x_col <- rlang::sym("x")
29 +
  y_col <- rlang::sym("y")
30 +
  group_col <- rlang::sym("group")
26 31
  p <- ggplot2::ggplot(
27 32
    curve_df,
28 -
    ggplot2::aes_string(x = "x", y = "y", color = "group")
33 +
    ggplot2::aes(x = !!x_col, y = !!y_col, color = !!group_col)
29 34
  )
30 35
  p <- p + ggplot2::geom_line()
31 36
  p <- .geom_basic(p, "TPs, FNs, FPs, and TNs by ranks",
@@ -41,9 +46,11 @@
Loading
41 46
  curve_df <- .prepare_autoplot(object)
42 47
43 48
  # === Create a ggplot object ===
49 +
  x_col <- rlang::sym("x")
50 +
  y_col <- rlang::sym("y")
44 51
  p <- ggplot2::ggplot(
45 52
    curve_df,
46 -
    ggplot2::aes_string(x = "x", y = "y")
53 +
    ggplot2::aes(x = !!x_col, y = !!y_col)
47 54
  )
48 55
  p <- p + ggplot2::geom_line()
49 56
  p <- p + ggplot2::facet_wrap(~group, ncol = 2)

@@ -312,7 +312,6 @@
Loading
312 312
# Get pAUCs
313 313
#
314 314
.gather_paucs <- function(curves) {
315 -
316 315
  # Collect AUCs of ROC or PRC curves
317 316
  ct_len <- 2
318 317
  aucs <- attr(curves, "aucs")

@@ -5,7 +5,6 @@
Loading
5 5
                            calc_avg = FALSE, cb_alpha = 0.05,
6 6
                            raw_curves = FALSE, na_worst = TRUE,
7 7
                            ties_method = "equiv") {
8 -
9 8
  # === Calculate AUC ROC ===
10 9
  plfunc <- function(s) {
11 10
    # AUC with the U statistic

@@ -8,12 +8,18 @@
Loading
8 8
#'
9 9
#'   \tabular{ll}{
10 10
#'     \strong{Function} \tab \strong{Description} \cr
11 -
#'     \code{\link{evalmod}}            \tab Main function to calculate evaluation measures \cr
12 -
#'     \code{\link{mmdata}}             \tab Reformat input data for performance evaluation calculation \cr
13 -
#'     \code{\link{join_scores}}        \tab Join scores of multiple models into a list \cr
14 -
#'     \code{\link{join_labels}}        \tab Join observed labels of multiple test datasets into a list \cr
15 -
#'     \code{\link{create_sim_samples}} \tab Create random samples for simulations \cr
16 -
#'     \code{\link{format_nfold}}       \tab Create n-fold cross validation dataset from data frame
11 +
#'     \code{\link{evalmod}}
12 +
#'           \tab Main function to calculate evaluation measures \cr
13 +
#'     \code{\link{mmdata}}
14 +
#'           \tab Reformat input data for performance evaluation calculation \cr
15 +
#'     \code{\link{join_scores}}
16 +
#'           \tab Join scores of multiple models into a list \cr
17 +
#'     \code{\link{join_labels}}
18 +
#'           \tab Join observed labels of multiple test datasets into a list \cr
19 +
#'     \code{\link{create_sim_samples}}
20 +
#'           \tab Create random samples for simulations \cr
21 +
#'     \code{\link{format_nfold}}
22 +
#'           \tab Create n-fold cross validation dataset from data frame
17 23
#'   }
18 24
#'
19 25
#' @section S3 generics:
@@ -21,16 +27,36 @@
Loading
21 27
#'  \code{S3} objects generated by the \code{\link{evalmod}} function.
22 28
#'
23 29
#'   \tabular{lll}{
24 -
#'     \strong{S3 generic} \tab \strong{Library} \tab \strong{Description} \cr
25 -
#'     \code{print}           \tab base      \tab Print the calculation results and the summary of the test data \cr
26 -
#'     \code{\link{as.data.frame}} \tab base \tab Convert a precrec object to a data frame \cr
27 -
#'     \code{\link{plot}}     \tab graphics  \tab Plot performance evaluation measures \cr
28 -
#'     \code{\link{autoplot}} \tab ggplot2   \tab Plot performance evaluation measures with ggplot2  \cr
29 -
#'     \code{\link{fortify}}  \tab ggplot2   \tab Prepare a data frame for ggplot2 \cr
30 -
#'     \code{\link{auc}}      \tab precrec   \tab Make a data frame with AUC scores \cr
31 -
#'     \code{\link{part}}     \tab precrec   \tab Calculate partial curves and partial AUC scores \cr
32 -
#'     \code{\link{pauc}}     \tab precrec   \tab Make a data frame with pAUC scores \cr
33 -
#'     \code{\link{auc_ci}}   \tab precrec   \tab Calculate confidence intervals of AUC scores
30 +
#'     \strong{S3 generic}
31 +
#'     \tab \strong{Library}
32 +
#'     \tab \strong{Description} \cr
33 +
#'     \code{print}
34 +
#'     \tab base
35 +
#'     \tab Print the calculation results and the summary of the test data \cr
36 +
#'     \code{\link{as.data.frame}}
37 +
#'     \tab base
38 +
#'     \tab Convert a precrec object to a data frame \cr
39 +
#'     \code{\link{plot}}
40 +
#'     \tab graphics
41 +
#'     \tab Plot performance evaluation measures \cr
42 +
#'     \code{\link{autoplot}}
43 +
#'     \tab ggplot2
44 +
#'     \tab Plot performance evaluation measures with ggplot2  \cr
45 +
#'     \code{\link{fortify}}
46 +
#'     \tab ggplot2
47 +
#'     \tab Prepare a data frame for ggplot2 \cr
48 +
#'     \code{\link{auc}}
49 +
#'     \tab precrec
50 +
#'     \tab Make a data frame with AUC scores \cr
51 +
#'     \code{\link{part}}
52 +
#'     \tab precrec
53 +
#'     \tab Calculate partial curves and partial AUC scores \cr
54 +
#'     \code{\link{pauc}}
55 +
#'     \tab precrec
56 +
#'     \tab Make a data frame with pAUC scores \cr
57 +
#'     \code{\link{auc_ci}}
58 +
#'     \tab precrec
59 +
#'     \tab Calculate confidence intervals of AUC scores
34 60
#'   }
35 61
#'
36 62
#' @section Performance measure calculations:
@@ -78,8 +104,9 @@
Loading
78 104
#' @importFrom ggplot2 fortify
79 105
#' @importFrom grDevices col2rgb rainbow rgb
80 106
#' @importFrom graphics abline layout legend lines
81 -
#' @importFrom matplot par plot plot.new polygon
107 +
#' @importFrom graphics matplot plot plot.new polygon
82 108
#' @importFrom methods is
109 +
#' @importFrom rlang sym
83 110
#' @importFrom stats qnorm rbeta rnorm sd qt
84 111
#' @importFrom data.table frank
85 112
#'

@@ -4,7 +4,6 @@
Loading
4 4
calc_auc_with_u <- function(sdat, scores = NULL, labels = NULL, na_worst = TRUE,
5 5
                            ties_method = "equiv", keep_sdat = FALSE,
6 6
                            ustat_method = "frank", ...) {
7 -
8 7
  # === Validate input arguments ===
9 8
  # Create sdat from scores and labels if sdat is missing
10 9
  sdat <- .create_src_obj(sdat, "sdat", reformat_data, scores, labels,

@@ -150,7 +150,6 @@
Loading
150 150
#
151 151
.join_datasets <- function(..., efunc_vtype = NULL, efunc_nrow = NULL,
152 152
                           byrow = FALSE, chklen = TRUE) {
153 -
154 153
  # Validate arguments
155 154
  .validate_join_datasets_args(...,
156 155
    efunc_vtype = efunc_vtype,
@@ -188,9 +187,9 @@
Loading
188 187
      cdat <- c(cdat, list(ds))
189 188
    } else if (is.matrix(ds) || is.data.frame(ds)) {
190 189
      if (byrow) {
191 -
        cdat <- c(cdat, lapply(seq(nrow(ds)), function(i) ds[i, ]))
190 +
        cdat <- c(cdat, lapply(seq_len(nrow(ds)), function(i) ds[i, ]))
192 191
      } else {
193 -
        cdat <- c(cdat, lapply(seq(ncol(ds)), function(j) ds[, j]))
192 +
        cdat <- c(cdat, lapply(seq_len(ncol(ds)), function(j) ds[, j]))
194 193
      }
195 194
    } else if (is.array(ds)) {
196 195
      if (length(dim(ds)) == 1) {
@@ -241,7 +240,6 @@
Loading
241 240
#
242 241
.validate_join_datasets_args <- function(..., efunc_vtype, efunc_nrow, byrow,
243 242
                                         chklen) {
244 -
245 243
  # Check ...
246 244
  arglist <- list(...)
247 245
  if (length(arglist) == 0) {

@@ -501,7 +501,6 @@
Loading
501 501
# matplot wrapper
502 502
#
503 503
.matplot_wrapper <- function(obj, type, curvetype, main, xlab, ylab) {
504 -
505 504
  # === Validate input arguments ===
506 505
  .validate(obj[[curvetype]])
507 506

@@ -5,7 +5,6 @@
Loading
5 5
                          modname = as.character(NA), dsid = 1L,
6 6
                          posclass = NULL, na_worst = TRUE,
7 7
                          ties_method = "equiv", mode = "rocprc", ...) {
8 -
9 8
  # === Validate input arguments ===
10 9
  new_ties_method <- .pmatch_tiesmethod(ties_method, ...)
11 10
  new_na_worst <- .get_new_naworst(na_worst, ...)
@@ -23,11 +22,12 @@
Loading
23 22
24 23
  if (mode == "aucroc") {
25 24
    # === Create an S3 object ===
26 -
    s3obj <- structure(list(
27 -
      scores = scores,
28 -
      labels = fmtlabs[["labels"]]
29 -
    ),
30 -
    class = "sdat"
25 +
    s3obj <- structure(
26 +
      list(
27 +
        scores = scores,
28 +
        labels = fmtlabs[["labels"]]
29 +
      ),
30 +
      class = "sdat"
31 31
    )
32 32
  } else {
33 33
    # Get score ranks and sorted indices
@@ -38,13 +38,14 @@
Loading
38 38
    rank_idx <- sranks[["rank_idx"]]
39 39
40 40
    # === Create an S3 object ===
41 -
    s3obj <- structure(list(
42 -
      scores = scores,
43 -
      labels = fmtlabs[["labels"]],
44 -
      ranks = ranks,
45 -
      rank_idx = rank_idx
46 -
    ),
47 -
    class = "fmdat"
41 +
    s3obj <- structure(
42 +
      list(
43 +
        scores = scores,
44 +
        labels = fmtlabs[["labels"]],
45 +
        ranks = ranks,
46 +
        rank_idx = rank_idx
47 +
      ),
48 +
      class = "fmdat"
48 49
    )
49 50
  }
50 51
@@ -99,7 +100,6 @@
Loading
99 100
#
100 101
.rank_scores <- function(scores, na_worst = TRUE, ties_method = "equiv",
101 102
                         validate = TRUE) {
102 -
103 103
  # === Validate input arguments ===
104 104
  if (validate) {
105 105
    .validate_scores(scores)
@@ -120,7 +120,6 @@
Loading
120 120
.validate_reformat_data_args <- function(scores, labels, modname, dsid,
121 121
                                         posclass, na_worst, ties_method,
122 122
                                         mode, ...) {
123 -
124 123
  # Check '...'
125 124
  arglist <- list(...)
126 125
  if (!is.null(names(arglist))) {

@@ -2,7 +2,6 @@
Loading
2 2
# Calculate basic evaluation measures from confusion matrices
3 3
#
4 4
calc_measures <- function(cmats, scores = NULL, labels = NULL, ...) {
5 -
6 5
  # === Validate input arguments ===
7 6
  # Create cmats from scores and labels if cmats is missing
8 7
  cmats <- .create_src_obj(

@@ -3,7 +3,6 @@
Loading
3 3
#
4 4
.validate_scores_and_labels <- function(obj, obj_name, scores, labels, ...) {
5 5
  if (missing(obj) || is.null(obj)) {
6 -
7 6
    # Check if scores and labels are specified
8 7
    if (is.null(scores) && !is.null(labels)) {
9 8
      stop("Invalid scores", call. = FALSE)
@@ -368,12 +367,13 @@
Loading
368 367
  if (!is.null(obj) && (attr(obj, "dataset_type") == "multiple")) {
369 368
    obj_calc_avg <- attr(obj, "args")[["calc_avg"]]
370 369
    if (show_cb && !obj_calc_avg) {
371 -
      stop(paste0(
372 -
        "calc_avg of the evalmod function",
373 -
        " must be set as TRUE before using show_cb",
374 -
        " of this function"
375 -
      ),
376 -
      call. = FALSE
370 +
      stop(
371 +
        paste0(
372 +
          "calc_avg of the evalmod function",
373 +
          " must be set as TRUE before using show_cb",
374 +
          " of this function"
375 +
        ),
376 +
        call. = FALSE
377 377
      )
378 378
    }
379 379
  }
@@ -393,12 +393,13 @@
Loading
393 393
    obj_calc_avg <- attr(obj, "args")[["calc_avg"]]
394 394
    obj_raw_curves <- attr(obj, "args")[["raw_curves"]]
395 395
    if (raw_curves && (!obj_calc_avg || !obj_raw_curves)) {
396 -
      stop(paste0(
397 -
        "Both calc_avg and raw_curves of the evalmod function",
398 -
        " must be set as TRUE before using raw_curves",
399 -
        " of this function"
400 -
      ),
401 -
      call. = FALSE
396 +
      stop(
397 +
        paste0(
398 +
          "Both calc_avg and raw_curves of the evalmod function",
399 +
          " must be set as TRUE before using raw_curves",
400 +
          " of this function"
401 +
        ),
402 +
        call. = FALSE
402 403
      )
403 404
    }
404 405
  }

@@ -144,7 +144,6 @@
Loading
144 144
# Get AUCs
145 145
#
146 146
.gather_aucs <- function(lcurves, mdat) {
147 -
148 147
  # Collect AUCs of ROC or PRC curves
149 148
  ct_len <- 2
150 149
  modnames <- attr(mdat, "data_info")[["modnames"]]
@@ -172,7 +171,6 @@
Loading
172 171
# Validate curves object generated by .pl_main_rocprc()
173 172
#
174 173
.validate_curves_common <- function(curves, class_name) {
175 -
176 174
  # Need to validate only once
177 175
  if (methods::is(curves, class_name) && attr(curves, "validated")) {
178 176
    return(curves)

@@ -65,7 +65,6 @@
Loading
65 65
#'
66 66
#' @export
67 67
create_sim_samples <- function(n_repeat, np, nn, score_names = "random") {
68 -
69 68
  # === Validate input arguments ===
70 69
  choices <- c("random", "poor_er", "good_er", "excel", "perf")
71 70
  if (assertthat::see_if(assertthat::is.string(score_names)) &&

@@ -4,7 +4,6 @@
Loading
4 4
pl_main <- function(mdat, mode = "rocprc", calc_avg = TRUE, cb_alpha = 0.05,
5 5
                    raw_curves = FALSE, x_bins = 1000, interpolate = TRUE,
6 6
                    na_worst = TRUE, ties_method = "equiv", validate = TRUE) {
7 -
8 7
  # === Validation ===
9 8
  new_mode <- .pmatch_mode(mode)
10 9
  if (validate) {
@@ -75,7 +74,6 @@
Loading
75 74
#
76 75
.validate_pl_main_args <- function(mdat, mode, calc_avg, cb_alpha, raw_curves,
77 76
                                   x_bins, interpolate) {
78 -
79 77
  # Validate mdat
80 78
  .validate(mdat)
81 79
  if (mode != "aucroc" && !is.null(mdat) && length(mdat) > 0 &&

@@ -196,7 +196,6 @@
Loading
196 196
                   expd_first = NULL, mode = "rocprc",
197 197
                   nfold_df = NULL, score_cols = NULL, lab_col = NULL,
198 198
                   fold_col = NULL, ...) {
199 -
200 199
  # === Join datasets ===
201 200
  if (!is.null(nfold_df) && !is.null(score_cols) && !is.null(lab_col) &&
202 201
    !is.null(fold_col)) {
@@ -417,7 +416,6 @@
Loading
417 416
#
418 417
.validate_mmdata_args <- function(lscores, llabels, modnames, dsids, posclass,
419 418
                                  na_worst, ties_method, expd_first, mode) {
420 -
421 419
  # Check lscores and llabels
422 420
  if (length(llabels) != 1 && length(lscores) != length(llabels)) {
423 421
    stop(paste0(

@@ -19,44 +19,48 @@
Loading
19 19
                            arg_names) {
20 20
  # Check class
21 21
  if (!methods::is(obj, class_name)) {
22 -
    stop(paste0(
23 -
      "Expected ", class_name, " created by ", func_name, "(): ",
24 -
      class(obj)
25 -
    ),
26 -
    call. = FALSE
22 +
    stop(
23 +
      paste0(
24 +
        "Expected ", class_name, " created by ", func_name, "(): ",
25 +
        class(obj)
26 +
      ),
27 +
      call. = FALSE
27 28
    )
28 29
  }
29 30
30 31
  # Check class items
31 32
  ditems <- setdiff(item_names, names(obj))
32 33
  if (!is.null(item_names) && length(ditems) > 0) {
33 -
    stop(paste0(
34 -
      "Invalid list items in ", class_name, ": ",
35 -
      paste(ditems, collapse = ", ")
36 -
    ),
37 -
    call. = FALSE
34 +
    stop(
35 +
      paste0(
36 +
        "Invalid list items in ", class_name, ": ",
37 +
        paste(ditems, collapse = ", ")
38 +
      ),
39 +
      call. = FALSE
38 40
    )
39 41
  }
40 42
41 43
  # Check attributes
42 44
  ditems <- setdiff(attr_names, names(attributes(obj)))
43 45
  if (!is.null(attr_names) && length(ditems) > 0) {
44 -
    stop(paste0(
45 -
      "Invalid attributes in ", class_name, ": ",
46 -
      paste(ditems, collapse = ", ")
47 -
    ),
48 -
    call. = FALSE
46 +
    stop(
47 +
      paste0(
48 +
        "Invalid attributes in ", class_name, ": ",
49 +
        paste(ditems, collapse = ", ")
50 +
      ),
51 +
      call. = FALSE
49 52
    )
50 53
  }
51 54
52 55
  # Check args
53 56
  ditems <- setdiff(names(attr(obj, "args")), arg_names)
54 57
  if (!is.null(arg_names) && length(ditems) > 0) {
55 -
    stop(paste0(
56 -
      "Invalid args in ", class_name, ": ",
57 -
      paste(ditems, collapse = ", ")
58 -
    ),
59 -
    call. = FALSE
58 +
    stop(
59 +
      paste0(
60 +
        "Invalid args in ", class_name, ": ",
61 +
        paste(ditems, collapse = ", ")
62 +
      ),
63 +
      call. = FALSE
60 64
    )
61 65
  }
62 66
}

@@ -24,7 +24,6 @@
Loading
24 24
#
25 25
.calc_avg_common <- function(obj, mode, class_name, modnames, uniq_modnames,
26 26
                             cb_alpha, x_bins) {
27 -
28 27
  # === Validate input arguments ===
29 28
  if (is.null(x_bins) || any(is.na(x_bins))) {
30 29
    x_bins <- 1

@@ -358,11 +358,12 @@
Loading
358 358
359 359
      modname <- factor(rep(modnames[i], length(x)), levels = uniq_modnames)
360 360
      dsid <- factor(rep(dsids[i], length(x)), levels = uniq_dsids)
361 -
      dsid_modname <- factor(rep(
362 -
        paste(modnames[i], dsids[i], sep = ":"),
363 -
        length(x)
364 -
      ),
365 -
      levels = dsid_modnames
361 +
      dsid_modname <- factor(
362 +
        rep(
363 +
          paste(modnames[i], dsids[i], sep = ":"),
364 +
          length(x)
365 +
        ),
366 +
        levels = dsid_modnames
366 367
      )
367 368
      curvename <- factor(rep(curvetype, length(x)),
368 369
        levels = names(curvetype_names)

@@ -309,7 +309,10 @@
Loading
309 309
#'
310 310
#' # a function to test mode = "aucroc"
311 311
#' func_evalmod_aucroc <- function(samp) {
312 -
#'   uaucs <- evalmod(scores = samp$scores, labels = samp$labels, mode = "aucroc")
312 +
#'   uaucs <- evalmod(
313 +
#'     scores = samp$scores, labels = samp$labels,
314 +
#'     mode = "aucroc"
315 +
#'   )
313 316
#'   as.data.frame(uaucs)
314 317
#' }
315 318
#'
@@ -327,7 +330,6 @@
Loading
327 330
                    posclass = NULL, na_worst = TRUE, ties_method = "equiv",
328 331
                    calc_avg = TRUE, cb_alpha = 0.05, raw_curves = FALSE,
329 332
                    x_bins = 1000, interpolate = TRUE, ...) {
330 -
331 333
  # Validation
332 334
  new_mode <- .get_new_mode(mode, mdat, "rocprc")
333 335
  new_ties_method <- .pmatch_tiesmethod(ties_method, ...)
@@ -374,11 +376,12 @@
Loading
374 376
  if (!is.null(mode)) {
375 377
    new_mode <- .pmatch_mode(mode)
376 378
    if (new_mode != "aucroc" && !is.na(mdat_mode) && mdat_mode == "aucroc") {
377 -
      stop(paste0(
378 -
        "Invalid 'mode': evalmod <- '", new_mode,
379 -
        "'', mmdata <- '", mdat_mode, "'"
380 -
      ),
381 -
      call. = FALSE
379 +
      stop(
380 +
        paste0(
381 +
          "Invalid 'mode': evalmod <- '", new_mode,
382 +
          "'', mmdata <- '", mdat_mode, "'"
383 +
        ),
384 +
        call. = FALSE
382 385
      )
383 386
    }
384 387
  } else if (!is.na(mdat_mode)) {
@@ -399,7 +402,6 @@
Loading
399 402
                                   posclass, na_worst, ties_method,
400 403
                                   calc_avg, cb_alpha, raw_curves,
401 404
                                   x_bins, interpolate) {
402 -
403 405
  # Check mode
404 406
  .validate_mode(mode)
405 407

@@ -30,14 +30,15 @@
Loading
30 30
      model[["tp"]], model[["fn"]],
31 31
      model[["fp"]], model[["tn"]]
32 32
    ),
33 -
    group = factor(c(
34 -
      rep("TPs", n), rep("FNs", n),
35 -
      rep("FPs", n), rep("TNs", n)
36 -
    ),
37 -
    levels = c(
38 -
      "TPs", "FNs",
39 -
      "FPs", "TNs"
40 -
    )
33 +
    group = factor(
34 +
      c(
35 +
        rep("TPs", n), rep("FNs", n),
36 +
        rep("FPs", n), rep("TNs", n)
37 +
      ),
38 +
      levels = c(
39 +
        "TPs", "FNs",
40 +
        "FPs", "TNs"
41 +
      )
41 42
    )
42 43
  )
43 44
}
@@ -64,28 +65,29 @@
Loading
64 65
      1 - pb[["specificity"]], pb[["precision"]],
65 66
      pb[["mcc"]], pb[["fscore"]]
66 67
    ),
67 -
    group = factor(c(
68 -
      rep("score", n),
69 -
      rep("label", n),
70 -
      rep("error", n),
71 -
      rep("accuracy", n),
72 -
      rep("specificity", n),
73 -
      rep("sensitivity", n),
74 -
      rep("1 - specificity", n),
75 -
      rep("precision", n),
76 -
      rep("mcc", n),
77 -
      rep("fscore", n)
78 -
    ),
79 -
    levels = c(
80 -
      "score", "label",
81 -
      "error", "accuracy",
82 -
      "specificity",
83 -
      "sensitivity",
84 -
      "1 - specificity",
85 -
      "precision",
86 -
      "mcc",
87 -
      "fscore"
88 -
    )
68 +
    group = factor(
69 +
      c(
70 +
        rep("score", n),
71 +
        rep("label", n),
72 +
        rep("error", n),
73 +
        rep("accuracy", n),
74 +
        rep("specificity", n),
75 +
        rep("sensitivity", n),
76 +
        rep("1 - specificity", n),
77 +
        rep("precision", n),
78 +
        rep("mcc", n),
79 +
        rep("fscore", n)
80 +
      ),
81 +
      levels = c(
82 +
        "score", "label",
83 +
        "error", "accuracy",
84 +
        "specificity",
85 +
        "sensitivity",
86 +
        "1 - specificity",
87 +
        "precision",
88 +
        "mcc",
89 +
        "fscore"
90 +
      )
89 91
    )
90 92
  )
91 93
}

@@ -3,7 +3,6 @@
Loading
3 3
#
4 4
create_confmats <- function(fmdat, scores = NULL, labels = NULL,
5 5
                            keep_fmdat = FALSE, ...) {
6 -
7 6
  # === Validate input arguments ===
8 7
  # Create fmdat from scores and labels if fmdat is missing
9 8
  fmdat <- .create_src_obj(fmdat, "fmdat", reformat_data, scores, labels, ...)
Files Coverage
R 96.71%
src 95.39%
Project Totals (35 files) 96.42%

No yaml found.

Create your codecov.yml to customize your Codecov experience

Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading