Tyarel8 18ce5de500
feat(std): add comparison support to bench command (#15843)
# Description

Like [hyperfine](https://github.com/sharkdp/hyperfine), I have added the
option to the `bench` command to benchmark multiple commands and then
compare the results.

```
→ bench { ls -a | is-empty } { fd | is-empty }
 # |         code         |       mean       |       min       |       max        |     std     | ratio
---+----------------------+------------------+-----------------+------------------+-------------+-------
 0 | { ls -a | is-empty } |  3ms 816µs 562ns | 3ms 670µs 400ns |        4ms 334µs | 146µs 304ns |  1.00
 1 | { fd | is-empty }    | 33ms 325µs 304ns |      31ms 963µs | 36ms 328µs 500ns | 701µs 295ns |  8.73

→ bench -p { ls -a | is-empty } { fd | is-empty }
Benchmark 1: { ls -a | is-empty }
    3ms 757µs 124ns +/- 103µs 165ns
Benchmark 2: { fd | is-empty }
    33ms 403µs 680ns +/- 704µs 904ns

{ ls -a | is-empty } ran
    8.89 times faster than { fd | is-empty }
```

When passing a single closure, it should behave the same except that
now, the `--verbose` flag controls whether the durations of every round
is printed, and the progress indicator is in it's own flag `--progress`.

# User-Facing Changes

There are user-facing changes, but I don't think anyone is using the
output of `bench` programmatically so it hopefully won't break anything.

---------

Co-authored-by: Bahex <Bahex@users.noreply.github.com>
2025-05-29 17:53:10 -05:00

143 lines
3.9 KiB
Plaintext

# run a piece of `nushell` code multiple times and measure the time of execution.
#
# this command returns a benchmark report of the following form:
#
# > **Note**
# > `std bench --pretty` will return a `string`.
@example "measure the performance of simple addition" { bench { 1 + 2 } } --result {
mean: 2308ns,
min: 2000ns,
max: 8500ns,
std: 895ns
}
@example "do 10 runs and show the time of each" { bench { 1 + 2 } -n 10 --verbose } --result {
mean: 3170ns,
min: 2200ns,
max: 9800ns,
std: 2228ns,
times: [
9800ns,
3100ns,
2800ns,
2300ns,
2500ns,
2200ns,
2300ns,
2300ns,
2200ns,
2200ns
]
}
@example "get a pretty benchmark report" { bench { 1 + 2 } --pretty } --result "3µs 125ns +/- 2µs 408ns"
@example "compare multiple commands" { bench { 2 + 4 } { 2 ** 4 } } --result [
[
code,
mean,
min,
max,
std,
ratio
];
[
"{ 2 + 4 }",
2406ns,
2100ns,
9400ns,
1012ns,
1.02732707087959
],
[
"{ 2 ** 4 }",
2342ns,
2100ns,
5300ns,
610ns,
1.0
]
]
@example "compare multiple commands with pretty report" { bench { 2 + 4 } { 2 ** 4 } --pretty } --result "
Benchmark 1: { 2 + 4 }
2µs 494ns +/- 1µs 105ns
Benchmark 2: { 2 ** 4 }
2µs 348ns +/- 565ns
{ 2 + 4 } ran
1 times faster than { 2 ** 4 }"
export def main [
...codes: closure # the piece(s) of `nushell` code to measure the performance of
--rounds (-n): int = 50 # the number of benchmark rounds (hopefully the more rounds the less variance)
--verbose (-v) # show individual times (has no effect if used with `--pretty`)
--progress (-P) # prints the progress
--pretty (-p) # shows the results in human-readable format: "<mean> +/- <stddev>"
]: [
nothing -> record<mean: duration, std: duration, times: list<duration>>
nothing -> record<mean: duration, std: duration>
nothing -> table<code: string, mean: duration, std: duration, ratio: float, times: list<duration>>
nothing -> table<code: string, mean: duration, std: duration, ratio: float>
nothing -> string
] {
let results = (
$codes | each {|code|
let times: list<duration> = (
seq 1 $rounds | each {|i|
if $progress { print -n $"($i) / ($rounds)\r" }
timeit { do $code }
}
)
if $progress { print $"($rounds) / ($rounds)" }
{
mean: ($times | math avg)
min: ($times | math min)
max: ($times | math max)
std: ($times | into int | into float | math stddev | into int | into duration)
}
| if $verbose { merge { times: $times }} else {}
}
)
# One benchmark
if ($results | length) == 1 {
let report = $results | first
if $pretty {
return $"($report.mean) +/- ($report.std)"
} else {
return $report
}
}
# Multiple benchmarks
let min_mean = $results | get mean | math min
let results = (
$codes
| each { view source $in | nu-highlight }
| wrap code
| merge $results
| insert ratio { $in.mean / $min_mean }
)
if $pretty {
$results
| enumerate
| each {|x|
let report = $x.item
print $"Benchmark ($x.index + 1): ($report.code)\n\t($report.mean) +/- ($report.std)"
}
let results = $results | sort-by ratio
print $"\n($results.0.code) ran"
$results
| skip
| each {|report|
print $"\t(ansi green)($report.ratio | math round -p 2)(ansi reset) times faster than ($report.code)"
}
ignore
} else {
$results
}
}