mirror of
https://github.com/nushell/nushell.git
synced 2025-06-09 03:26:58 +02:00
feat(std): add comparison support to bench
command (#15843)
# Description Like [hyperfine](https://github.com/sharkdp/hyperfine), I have added the option to the `bench` command to benchmark multiple commands and then compare the results. ``` → bench { ls -a | is-empty } { fd | is-empty } # | code | mean | min | max | std | ratio ---+----------------------+------------------+-----------------+------------------+-------------+------- 0 | { ls -a | is-empty } | 3ms 816µs 562ns | 3ms 670µs 400ns | 4ms 334µs | 146µs 304ns | 1.00 1 | { fd | is-empty } | 33ms 325µs 304ns | 31ms 963µs | 36ms 328µs 500ns | 701µs 295ns | 8.73 → bench -p { ls -a | is-empty } { fd | is-empty } Benchmark 1: { ls -a | is-empty } 3ms 757µs 124ns +/- 103µs 165ns Benchmark 2: { fd | is-empty } 33ms 403µs 680ns +/- 704µs 904ns { ls -a | is-empty } ran 8.89 times faster than { fd | is-empty } ``` When passing a single closure, it should behave the same except that now, the `--verbose` flag controls whether the durations of every round is printed, and the progress indicator is in it's own flag `--progress`. # User-Facing Changes There are user-facing changes, but I don't think anyone is using the output of `bench` programmatically so it hopefully won't break anything. --------- Co-authored-by: Bahex <Bahex@users.noreply.github.com>
This commit is contained in:
parent
fbde02370a
commit
18ce5de500
@ -4,52 +4,139 @@
|
|||||||
#
|
#
|
||||||
# > **Note**
|
# > **Note**
|
||||||
# > `std bench --pretty` will return a `string`.
|
# > `std bench --pretty` will return a `string`.
|
||||||
@example "measure the performance of simple addition" { bench { 1 + 2 } -n 10 } --result {
|
@example "measure the performance of simple addition" { bench { 1 + 2 } } --result {
|
||||||
mean: (4µs + 956ns)
|
mean: 2308ns,
|
||||||
std: (4µs + 831ns)
|
min: 2000ns,
|
||||||
|
max: 8500ns,
|
||||||
|
std: 895ns
|
||||||
|
}
|
||||||
|
@example "do 10 runs and show the time of each" { bench { 1 + 2 } -n 10 --verbose } --result {
|
||||||
|
mean: 3170ns,
|
||||||
|
min: 2200ns,
|
||||||
|
max: 9800ns,
|
||||||
|
std: 2228ns,
|
||||||
times: [
|
times: [
|
||||||
(19µs + 402ns)
|
9800ns,
|
||||||
( 4µs + 322ns)
|
3100ns,
|
||||||
( 3µs + 352ns)
|
2800ns,
|
||||||
( 2µs + 966ns)
|
2300ns,
|
||||||
( 3µs )
|
2500ns,
|
||||||
( 3µs + 86ns)
|
2200ns,
|
||||||
( 3µs + 84ns)
|
2300ns,
|
||||||
( 3µs + 604ns)
|
2300ns,
|
||||||
( 3µs + 98ns)
|
2200ns,
|
||||||
( 3µs + 653ns)
|
2200ns
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@example "get a pretty benchmark report" { bench { 1 + 2 } --pretty } --result "3µs 125ns +/- 2µs 408ns"
|
@example "get a pretty benchmark report" { bench { 1 + 2 } --pretty } --result "3µs 125ns +/- 2µs 408ns"
|
||||||
|
@example "compare multiple commands" { bench { 2 + 4 } { 2 ** 4 } } --result [
|
||||||
|
[
|
||||||
|
code,
|
||||||
|
mean,
|
||||||
|
min,
|
||||||
|
max,
|
||||||
|
std,
|
||||||
|
ratio
|
||||||
|
];
|
||||||
|
[
|
||||||
|
"{ 2 + 4 }",
|
||||||
|
2406ns,
|
||||||
|
2100ns,
|
||||||
|
9400ns,
|
||||||
|
1012ns,
|
||||||
|
1.02732707087959
|
||||||
|
],
|
||||||
|
[
|
||||||
|
"{ 2 ** 4 }",
|
||||||
|
2342ns,
|
||||||
|
2100ns,
|
||||||
|
5300ns,
|
||||||
|
610ns,
|
||||||
|
1.0
|
||||||
|
]
|
||||||
|
]
|
||||||
|
@example "compare multiple commands with pretty report" { bench { 2 + 4 } { 2 ** 4 } --pretty } --result "
|
||||||
|
Benchmark 1: { 2 + 4 }
|
||||||
|
2µs 494ns +/- 1µs 105ns
|
||||||
|
Benchmark 2: { 2 ** 4 }
|
||||||
|
2µs 348ns +/- 565ns
|
||||||
|
|
||||||
|
{ 2 + 4 } ran
|
||||||
|
1 times faster than { 2 ** 4 }"
|
||||||
export def main [
|
export def main [
|
||||||
code: closure # the piece of `nushell` code to measure the performance of
|
...codes: closure # the piece(s) of `nushell` code to measure the performance of
|
||||||
--rounds (-n): int = 50 # the number of benchmark rounds (hopefully the more rounds the less variance)
|
--rounds (-n): int = 50 # the number of benchmark rounds (hopefully the more rounds the less variance)
|
||||||
--verbose (-v) # be more verbose (namely prints the progress)
|
--verbose (-v) # show individual times (has no effect if used with `--pretty`)
|
||||||
--pretty # shows the results in human-readable format: "<mean> +/- <stddev>"
|
--progress (-P) # prints the progress
|
||||||
|
--pretty (-p) # shows the results in human-readable format: "<mean> +/- <stddev>"
|
||||||
]: [
|
]: [
|
||||||
nothing -> record<mean: duration, std: duration, times: list<duration>>
|
nothing -> record<mean: duration, std: duration, times: list<duration>>
|
||||||
|
nothing -> record<mean: duration, std: duration>
|
||||||
|
nothing -> table<code: string, mean: duration, std: duration, ratio: float, times: list<duration>>
|
||||||
|
nothing -> table<code: string, mean: duration, std: duration, ratio: float>
|
||||||
nothing -> string
|
nothing -> string
|
||||||
] {
|
] {
|
||||||
let times: list<duration> = (
|
let results = (
|
||||||
seq 1 $rounds | each {|i|
|
$codes | each {|code|
|
||||||
if $verbose { print -n $"($i) / ($rounds)\r" }
|
let times: list<duration> = (
|
||||||
timeit { do $code }
|
seq 1 $rounds | each {|i|
|
||||||
|
if $progress { print -n $"($i) / ($rounds)\r" }
|
||||||
|
timeit { do $code }
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if $progress { print $"($rounds) / ($rounds)" }
|
||||||
|
|
||||||
|
{
|
||||||
|
mean: ($times | math avg)
|
||||||
|
min: ($times | math min)
|
||||||
|
max: ($times | math max)
|
||||||
|
std: ($times | into int | into float | math stddev | into int | into duration)
|
||||||
|
}
|
||||||
|
| if $verbose { merge { times: $times }} else {}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
if $verbose { print $"($rounds) / ($rounds)" }
|
# One benchmark
|
||||||
|
if ($results | length) == 1 {
|
||||||
let report = {
|
let report = $results | first
|
||||||
mean: ($times | math avg)
|
if $pretty {
|
||||||
min: ($times | math min)
|
return $"($report.mean) +/- ($report.std)"
|
||||||
max: ($times | math max)
|
} else {
|
||||||
std: ($times | into int | into float | math stddev | into int | into duration)
|
return $report
|
||||||
times: ($times)
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Multiple benchmarks
|
||||||
|
let min_mean = $results | get mean | math min
|
||||||
|
let results = (
|
||||||
|
$codes
|
||||||
|
| each { view source $in | nu-highlight }
|
||||||
|
| wrap code
|
||||||
|
| merge $results
|
||||||
|
| insert ratio { $in.mean / $min_mean }
|
||||||
|
)
|
||||||
|
|
||||||
if $pretty {
|
if $pretty {
|
||||||
$"($report.mean) +/- ($report.std)"
|
$results
|
||||||
|
| enumerate
|
||||||
|
| each {|x|
|
||||||
|
let report = $x.item
|
||||||
|
print $"Benchmark ($x.index + 1): ($report.code)\n\t($report.mean) +/- ($report.std)"
|
||||||
|
}
|
||||||
|
|
||||||
|
let results = $results | sort-by ratio
|
||||||
|
|
||||||
|
print $"\n($results.0.code) ran"
|
||||||
|
|
||||||
|
$results
|
||||||
|
| skip
|
||||||
|
| each {|report|
|
||||||
|
print $"\t(ansi green)($report.ratio | math round -p 2)(ansi reset) times faster than ($report.code)"
|
||||||
|
}
|
||||||
|
|
||||||
|
ignore
|
||||||
} else {
|
} else {
|
||||||
$report
|
$results
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user