|
| 1 | +open Bechamel |
| 2 | + |
| 3 | +(* From our function [make_list], we make an indexed (by [args]) test. It's a list |
| 4 | + of tests which are applied with [args] such as: |
| 5 | +
|
| 6 | + {[ |
| 7 | + let test = |
| 8 | + [ make_list 0 |
| 9 | + ; make_list 10 |
| 10 | + ; make_list 100 |
| 11 | + ; make_list 400 |
| 12 | + ; make_list 1000 ] |
| 13 | + ]} *) |
| 14 | +let static_array = [| 33 |] |
| 15 | + |
| 16 | +let test = |
| 17 | + Test.make_indexed ~name:"Belt.Array.push" ~fmt:"%s %d" |
| 18 | + ~args:[ 0; 100; 500; 1000; 10000 ] (fun words -> |
| 19 | + Staged.stage @@ fun () -> Belt.Array.push static_array words) |
| 20 | + |
| 21 | +(* From our test, we can start to benchmark it! |
| 22 | +
|
| 23 | + A benchmark is a /run/ of your test multiple times. From results given by |
| 24 | + [Benchmark.all], an analyse is needed to infer measures of one call of your |
| 25 | + test. |
| 26 | +
|
| 27 | + [Bechamel] asks 3 things: |
| 28 | + - what you want to record (see [instances]) |
| 29 | + - how you want to analyse (see [ols]) |
| 30 | + - how you want to benchmark your test (see [cfg]) |
| 31 | +
|
| 32 | + The core of [Bechamel] (see [Bechamel.Toolkit]) has some possible measures |
| 33 | + such as the [monotonic-clock] to see time performances. |
| 34 | +
|
| 35 | + The analyse can be OLS (Ordinary Least Square) or RANSAC. In this example, we |
| 36 | + use only one. |
| 37 | +
|
| 38 | + Finally, to launch the benchmark, we need some others details such as: |
| 39 | + - should we stabilise the GC? |
| 40 | + - how many /run/ you want |
| 41 | + - the maximum of time allowed by the benchmark |
| 42 | + - etc. |
| 43 | +
|
| 44 | + [raw_results] is what the benchmark produced. [results] is what the analyse |
| 45 | + can infer. The first one is used to show graphs or to let the user (with |
| 46 | + [Measurement_raw]) to infer something else than what [ols] did. The second is |
| 47 | + mostly what you want: a synthesis of /samples/. *) |
| 48 | + |
| 49 | +let benchmark () = |
| 50 | + let ols = |
| 51 | + Analyze.ols ~bootstrap:0 ~r_square:true ~predictors:Measure.[| run |] |
| 52 | + in |
| 53 | + let instances = |
| 54 | + Toolkit.Instance.[ minor_allocated; major_allocated; monotonic_clock ] |
| 55 | + in |
| 56 | + let cfg = |
| 57 | + Benchmark.cfg ~limit:2000 ~quota:(Time.second 0.5) ~kde:(Some 1000) () |
| 58 | + in |
| 59 | + let raw_results = Benchmark.all cfg instances test in |
| 60 | + let results = |
| 61 | + List.map (fun instance -> Analyze.all ols instance raw_results) instances |
| 62 | + in |
| 63 | + let results = Analyze.merge ols instances results in |
| 64 | + (results, raw_results) |
| 65 | + |
| 66 | +let () = |
| 67 | + List.iter |
| 68 | + (fun v -> Bechamel_notty.Unit.add v (Measure.unit v)) |
| 69 | + Toolkit.Instance.[ minor_allocated; major_allocated; monotonic_clock ] |
| 70 | + |
| 71 | +let img (window, results) = |
| 72 | + Bechamel_notty.Multiple.image_of_ols_results ~rect:window |
| 73 | + ~predictor:Measure.run results |
| 74 | + |
| 75 | +open Notty_unix |
| 76 | + |
| 77 | +let () = |
| 78 | + let window = |
| 79 | + match winsize Unix.stdout with |
| 80 | + | Some (w, h) -> { Bechamel_notty.w; h } |
| 81 | + | None -> { Bechamel_notty.w = 80; h = 1 } |
| 82 | + in |
| 83 | + let results, _ = benchmark () in |
| 84 | + img (window, results) |> eol |> output_image |
0 commit comments