module Bench: Bench
open Core.Std module Bench = Core_extended.Bench
let main () =
Bench.bench Bench.Test.create ~name:"test" (fun () -> ignore (Time.now ()))
;;
let () = main ()
module Test:sig
..end
module Result:sig
..end
gc_prefs: can be used to set custom GC settings during benchmarking (they will be reverted when the function returns)
no_compactions (default false): disables compactions during benchmarking, reverted when the function returns. Takes precedence over gc_prefs.
fast (default false): run fewer tests and thus get less accurate results in less time.
clock (default wall): controls time measurement method. Wall includes waiting on I/O
and when the process is suspended/descheduled; cpu only counts time spent on
computations.
type'a
with_benchmark_flags =?verbosity:[ `High | `Low | `Mid ] ->
?gc_prefs:Core.Gc.Control.t ->
?no_compactions:bool -> ?fast:bool -> ?clock:[ `Cpu | `Wall ] -> 'a
type'a
with_print_flags =?time_format:[ `Auto | `Ms | `Ns | `S | `Us ] -> ?limit_width_to:int -> 'a
The "Name" and "Input size" columns of the printed table reflect the values passed to
Test.create. The "Normalized" column is run_time / input_size
. "Stdev" reports the
standard deviation for the "Run time" column. "Allocated" reports the average number
of allocated words through the benchmarks.
"Warnings" may contain single characters indicating various things:
'm' indicates the minimum run time was less than 80% of the mean
'M' indicates the maximum run time was more than 120% of the mean
'c' indicates GC compactions occurred during testing
'a' indicates the number of words allocated was not the same in all tests
val bench : (Test.t list -> unit) with_benchmark_flags with_print_flags
val bench_raw : (Test.t list -> Result.t list) with_benchmark_flags
bench_raw
returns a list documenting the runtimes rather than printing to stdout. These can be
fed to print for results identical to calling bench.val print : (Result.t list -> unit) with_print_flags