No flags found
Use flags to group coverage reports by test type, project and/or folders.
Then setup custom commit statuses and notifications for each flag.
e.g., #unittest #integration
#production #enterprise
#frontend #backend
e26de2a
... +6 ...
3dcc427
Use flags to group coverage reports by test type, project and/or folders.
Then setup custom commit statuses and notifications for each flag.
e.g., #unittest #integration
#production #enterprise
#frontend #backend
1 | 1 | #' Benchmark in parallel |
|
2 | - | #' |
|
2 | + | #' |
|
3 | 3 | #' This function runs benchmarks in parallel to test multithreading |
|
4 | 4 | #' @param bm character name of benchmark function to run from \code{\link{get_available_benchmarks}} |
|
5 | 5 | #' @param runs number of runs of benchmark to make |
|
6 | 6 | #' @param verbose display messages during benchmarking |
|
7 | - | #' @param cores number of cores to benchmark. If cores is specified, the benchmark is also |
|
8 | - | #' run for cores = 1 to allow for normalisation. |
|
7 | + | #' @param cores number of cores to benchmark. If cores is specified, the benchmark is also |
|
8 | + | #' run for cores = 1 to allow for normalisation. |
|
9 | 9 | #' @param ... additional arguments to pass to \code{bm} |
|
10 | 10 | #' @import parallel |
|
11 | 11 | #' @import foreach |
|
12 | 12 | #' @import doParallel |
|
13 | 13 | #' @export |
|
14 | - | #' @examples |
|
14 | + | #' @examples |
|
15 | 15 | #' \dontrun{ |
|
16 | 16 | #' bm_parallel("bm_matrix_cal_manip", runs = 3, verbose = TRUE, cores = 2) |
|
17 | - | #' bm = c("bm_matrix_cal_manip","bm_matrix_cal_power", "bm_matrix_cal_sort", |
|
17 | + | #' bm = c("bm_matrix_cal_manip","bm_matrix_cal_power", "bm_matrix_cal_sort", |
|
18 | 18 | #' "bm_matrix_cal_cross_product", "bm_matrix_cal_lm") |
|
19 | - | #' results = lapply(bm, bm_parallel, |
|
19 | + | #' results = lapply(bm, bm_parallel, |
|
20 | 20 | #' runs = 5, verbose = TRUE, cores = 2L) |
|
21 | 21 | #' } |
|
22 | 22 | #' @importFrom foreach foreach %dopar% |
|
23 | 23 | bm_parallel = function(bm, runs, verbose, cores, ...) { |
|
24 | 24 | args = list(...) |
|
25 | 25 | args[["runs"]] = 1 |
|
26 | - | ||
26 | + | ||
27 | 27 | #TODO consider dropping first results from parallel results due to overhead |
|
28 | - | results = data.frame(user = NA, system = NA, elapsed = NA, test = NA, |
|
28 | + | results = data.frame(user = NA, system = NA, elapsed = NA, test = NA, |
|
29 | 29 | test_group = NA, cores = NA) |
|
30 | 30 | for (core in cores) { |
|
31 | 31 | cl = parallel::makeCluster(core, outfile = "") |
|
32 | - | ||
33 | - | parallel::clusterExport(cl, bm) # Export |
|
32 | + | ||
33 | + | parallel::clusterExport(cl, bm) # Export |
|
34 | 34 | doParallel::registerDoParallel(cl) |
|
35 | 35 | tmp = data.frame(user = numeric(length(runs)), system = 0, elapsed = 0, |
|
36 | 36 | test = NA, test_group = NA, cores = NA, stringsAsFactors = FALSE) |
|
37 | 37 | ||
38 | 38 | args$runs = 1 |
|
39 | 39 | for (j in 1:runs) { |
|
40 | 40 | tmp[j, 1:3] = system.time({ |
|
41 | - | out = foreach(k = 1:(core)) %dopar% |
|
41 | + | out = foreach(k = 1:(core)) %dopar% |
|
42 | 42 | do.call(bm, args, quote = TRUE) #, envir = environment(bm_parallel)) |
|
43 | 43 | })[1:3] |
|
44 | 44 | } |
48 | 48 | results = rbind(results, tmp) |
|
49 | 49 | parallel::stopCluster(cl)# Would be nice to have on.exit here, but we run out of memory |
|
50 | 50 | } |
|
51 | - | ||
51 | + | ||
52 | 52 | return(na.omit(results)) |
|
53 | 53 | } |
3 | 3 | benchmarkmeData::is_blas_optimize |
|
4 | 4 | ||
5 | 5 | #' Benchmark rankings |
|
6 | - | #' |
|
6 | + | #' |
|
7 | 7 | #' Comparison with past results. |
|
8 | 8 | #' @inheritParams upload_results |
|
9 | 9 | #' @inheritParams benchmark_std |
12 | 12 | #' @import dplyr |
|
13 | 13 | #' @export |
|
14 | 14 | rank_results = function(results, |
|
15 | - | blas_optimize = is_blas_optimize(results), |
|
15 | + | blas_optimize = is_blas_optimize(results), |
|
16 | 16 | verbose = TRUE) { |
|
17 | - | ||
18 | - | ||
17 | + | ||
18 | + | ||
19 | 19 | no_of_test_groups = length(unique(results$test_group)) |
|
20 | - | if (no_of_test_groups != 1) |
|
20 | + | if (no_of_test_groups != 1) |
|
21 | 21 | stop("Can only rank a single group at a time", call. = FALSE) |
|
22 | - | ||
22 | + | ||
23 | 23 | no_of_reps = length(results$test) / length(unique(results$test)) |
|
24 | - | results_tib = tibble(time = sum(results$elapsed) / no_of_reps, |
|
24 | + | results_tib = tibble(time = sum(results$elapsed) / no_of_reps, |
|
25 | 25 | is_past = FALSE) |
|
26 | - | ||
26 | + | ||
27 | 27 | if (is.null(blas_optimize)) blas_optimize = c(FALSE, TRUE) |
|
28 | 28 | tmp_env = new.env() |
|
29 | 29 | data(past_results_v2, package = "benchmarkmeData", envir = tmp_env) |
|
30 | 30 | pst = tmp_env$past_results_v2 |
|
31 | 31 | pst$test_group = as.character(pst$test_group) |
|
32 | - | ||
32 | + | ||
33 | 33 | rankings = pst %>% |
|
34 | 34 | filter(test_group == unique(results$test_group)) %>% |
|
35 | 35 | filter(blas_optimize %in% !!blas_optimize) %>% |
41 | 41 | arrange(time) |
|
42 | 42 | ||
43 | 43 | ben_rank = which(!rankings$is_past) |
|
44 | - | ||
44 | + | ||
45 | 45 | if (verbose) |
|
46 | 46 | message("You are ranked ", ben_rank, " out of ", nrow(rankings), " machines.") |
|
47 | 47 | ben_rank |
32 | 32 | } |
|
33 | 33 | ||
34 | 34 | #' Get the amount of RAM |
|
35 | - | #' |
|
36 | - | #' Attempt to extract the amount of RAM on the current machine. This is OS |
|
35 | + | #' |
|
36 | + | #' Attempt to extract the amount of RAM on the current machine. This is OS |
|
37 | 37 | #' specific: |
|
38 | 38 | #' \itemize{ |
|
39 | 39 | #' \item Linux: \code{proc/meminfo} |
|
40 | 40 | #' \item Apple: \code{system_profiler -detailLevel mini} |
|
41 | - | #' \item Windows: \code{memory.size()} |
|
41 | + | #' \item Windows: First tries \code{grep MemTotal /proc/meminfo} then falls back to |
|
42 | + | #' \code{wmic MemoryChip get Capacity} |
|
42 | 43 | #' \item Solaris: \code{prtconf} |
|
43 | 44 | #' } |
|
44 | 45 | #' A value of \code{NA} is return if it isn't possible to determine the amount of RAM. |
|
45 | 46 | #' @export |
|
46 | 47 | #' @references The \code{print.bytes} function was taken from the \pkg{pryr} package. |
|
47 | - | #' @examples |
|
48 | + | #' @examples |
|
48 | 49 | #' ## Return (and pretty print) the amount of RAM |
|
49 | 50 | #' get_ram() |
|
50 | 51 | get_ram = function() { |
57 | 58 | } else { |
|
58 | 59 | cleaned_ram = suppressWarnings(try(clean_ram(ram, os), silent = TRUE)) |
|
59 | 60 | if (class(cleaned_ram) == "try-error" || length(ram) == 0) { |
|
60 | - | message("\t Unable to detect your RAM. # nocov |
|
61 | + | message("\t Unable to detect your RAM. # nocov |
|
61 | 62 | Please raise an issue at https://github.com/csgillespie/benchmarkme") # nocov |
|
62 | 63 | ram = structure(NA, class = "ram") #nocov |
|
63 | 64 | } else { |
69 | 70 | ||
70 | 71 | #' @rawNamespace S3method(print,ram) |
|
71 | 72 | print.ram = function(x, digits = 3, unit_system = c("metric", "iec"), ...) { |
|
72 | - | #unit_system = match.arg(unit_system) |
|
73 | - | unit_system = "metric" |
|
73 | + | unit_system = match.arg(unit_system) |
|
74 | + | #unit_system = "metric" |
|
74 | 75 | base = switch(unit_system, metric = 1000, iec = 1024) |
|
75 | 76 | power = min(floor(log(abs(x), base)), 8) |
|
76 | 77 | if (is.na(x) || power < 1) { |
84 | 85 | unit = unit_labels[[power]] |
|
85 | 86 | x = x / (base^power) |
|
86 | 87 | } |
|
87 | - | ||
88 | - | formatted = format(signif(x, digits = digits), big.mark = ",", |
|
88 | + | ||
89 | + | formatted = format(signif(x, digits = digits), big.mark = ",", |
|
89 | 90 | scientific = FALSE, ...) |
|
90 | 91 | cat(unclass(formatted), " ", unit, "\n", sep = "") |
|
91 | 92 | invisible(paste(unclass(formatted), unit)) |
1 | 1 | #' Run standard benchmarks |
|
2 | - | #' |
|
2 | + | #' |
|
3 | 3 | #' @description This function runs a set of standard benchmarks, which should be suitable for most |
|
4 | 4 | #' machines. It runs a collection of matrix benchmark functions |
|
5 | 5 | #' \itemize{ |
11 | 11 | #' @param runs Number of times to run the test. Default 3. |
|
12 | 12 | #' @param cores Default 0 (serial). When cores > 0, the benchmark is run in parallel. |
|
13 | 13 | #' @param verbose Default TRUE. |
|
14 | - | #' @details Setting \code{cores} equal to 1 is useful for assessing the impact of the |
|
15 | - | #' parallel computing overhead. |
|
14 | + | #' @details Setting \code{cores} equal to 1 is useful for assessing the impact of the |
|
15 | + | #' parallel computing overhead. |
|
16 | 16 | #' @export |
|
17 | - | #' @examples |
|
17 | + | #' @examples |
|
18 | 18 | #' ## Benchmark your system |
|
19 | 19 | #' \dontrun{ |
|
20 | 20 | #' res = benchmark_std(3) |
|
21 | - | #' |
|
21 | + | #' |
|
22 | 22 | #' ## Plot results |
|
23 | 23 | #' plot(res) |
|
24 | 24 | #' } |
|
25 | 25 | benchmark_std = function(runs = 3, verbose = TRUE, cores = 0L) { |
|
26 | - | rbind(benchmark_prog(runs, verbose, cores), |
|
26 | + | rbind(benchmark_prog(runs, verbose, cores), |
|
27 | 27 | benchmark_matrix_cal(runs, verbose, cores), |
|
28 | 28 | benchmark_matrix_fun(runs, verbose, cores)) |
|
29 | 29 | } |
Files | Coverage |
---|---|
R | 49.48% |
Project Totals (20 files) | 49.48% |
3dcc427
b524095
0e5caee
25260aa
6f344a3
b8b6f0b
ee7897e
e26de2a