forked from extern/nushell
Implement passthrough for benchmark (#2580)
* Implement passthrough for benchmark Add a new option -p,--passthrough to benchmark. With this option, the benchmark command prints its results to stdout and passes the block's output to the next command in the pipeline. * Add execution block for benchmark -p `benchmark --passthrough` now takes a block where the benchmark output is sent. Example: `benchmark -p {save bench.toml} {ls}`
This commit is contained in:
parent
9b577b8679
commit
09429d08aa
@ -4,7 +4,10 @@ use crate::prelude::*;
|
||||
#[cfg(feature = "rich-benchmark")]
|
||||
use heim::cpu::time;
|
||||
use nu_errors::ShellError;
|
||||
use nu_protocol::{hir::Block, Dictionary, Signature, SyntaxShape, UntaggedValue, Value};
|
||||
use nu_protocol::{
|
||||
hir::{Block, ClassifiedCommand, Commands, InternalCommand},
|
||||
Dictionary, Scope, Signature, SyntaxShape, UntaggedValue, Value,
|
||||
};
|
||||
use std::convert::TryInto;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
@ -13,6 +16,7 @@ pub struct Benchmark;
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct BenchmarkArgs {
|
||||
block: Block,
|
||||
passthrough: Option<Block>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
@ -22,11 +26,18 @@ impl WholeStreamCommand for Benchmark {
|
||||
}
|
||||
|
||||
fn signature(&self) -> Signature {
|
||||
Signature::build("benchmark").required(
|
||||
Signature::build("benchmark")
|
||||
.required(
|
||||
"block",
|
||||
SyntaxShape::Block,
|
||||
"the block to run and benchmark",
|
||||
)
|
||||
.named(
|
||||
"passthrough",
|
||||
SyntaxShape::Block,
|
||||
"Display the benchmark results and pass through the block's output",
|
||||
Some('p'),
|
||||
)
|
||||
}
|
||||
|
||||
fn usage(&self) -> &str {
|
||||
@ -42,15 +53,21 @@ impl WholeStreamCommand for Benchmark {
|
||||
}
|
||||
|
||||
fn examples(&self) -> Vec<Example> {
|
||||
vec![Example {
|
||||
vec![
|
||||
Example {
|
||||
description: "Benchmarks a command within a block",
|
||||
example: "benchmark { sleep 500ms }",
|
||||
result: None,
|
||||
}]
|
||||
},
|
||||
Example {
|
||||
description: "Benchmarks a command within a block and passes its output through",
|
||||
example: "echo 45 | benchmark { sleep 500ms } --passthrough {}",
|
||||
result: Some(vec![UntaggedValue::int(45).into()]),
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "rich-benchmark")]
|
||||
async fn benchmark(
|
||||
raw_args: CommandArgs,
|
||||
registry: &CommandRegistry,
|
||||
@ -60,10 +77,11 @@ async fn benchmark(
|
||||
let tag = raw_args.call_info.args.span;
|
||||
let mut context = EvaluationContext::from_raw(&raw_args, ®istry);
|
||||
let scope = raw_args.call_info.scope.clone();
|
||||
let (BenchmarkArgs { block }, input) = raw_args.process(®istry).await?;
|
||||
let (BenchmarkArgs { block, passthrough }, input) = raw_args.process(®istry).await?;
|
||||
|
||||
let start_time = Instant::now();
|
||||
|
||||
#[cfg(feature = "rich-benchmark")]
|
||||
let start = time().await;
|
||||
|
||||
let result = run_block(
|
||||
@ -75,30 +93,41 @@ async fn benchmark(
|
||||
&scope.env,
|
||||
)
|
||||
.await;
|
||||
let _ = result?.drain_vec().await;
|
||||
let output = result?.into_vec().await;
|
||||
|
||||
#[cfg(feature = "rich-benchmark")]
|
||||
let end = time().await;
|
||||
|
||||
let end_time = Instant::now();
|
||||
context.clear_errors();
|
||||
|
||||
// return basic runtime
|
||||
#[cfg(not(feature = "rich-benchmark"))]
|
||||
{
|
||||
let mut indexmap = IndexMap::with_capacity(1);
|
||||
|
||||
let real_time = into_big_int(end_time - start_time);
|
||||
indexmap.insert("real time".to_string(), real_time);
|
||||
benchmark_output(indexmap, output, passthrough, &tag, &mut context, &scope).await
|
||||
}
|
||||
// return advanced stats
|
||||
#[cfg(feature = "rich-benchmark")]
|
||||
if let (Ok(start), Ok(end)) = (start, end) {
|
||||
let mut indexmap = IndexMap::with_capacity(4);
|
||||
|
||||
let real_time = into_value(end_time - start_time, &tag);
|
||||
let real_time = into_big_int(end_time - start_time);
|
||||
indexmap.insert("real time".to_string(), real_time);
|
||||
|
||||
let user_time = into_value(end.user() - start.user(), &tag);
|
||||
let user_time = into_big_int(end.user() - start.user());
|
||||
indexmap.insert("user time".to_string(), user_time);
|
||||
|
||||
let system_time = into_value(end.system() - start.system(), &tag);
|
||||
let system_time = into_big_int(end.system() - start.system());
|
||||
indexmap.insert("system time".to_string(), system_time);
|
||||
|
||||
let idle_time = into_value(end.idle() - start.idle(), &tag);
|
||||
let idle_time = into_big_int(end.idle() - start.idle());
|
||||
indexmap.insert("idle time".to_string(), idle_time);
|
||||
|
||||
let value = UntaggedValue::Row(Dictionary::from(indexmap)).into_value(&tag);
|
||||
Ok(OutputStream::one(value))
|
||||
benchmark_output(indexmap, output, passthrough, &tag, &mut context, &scope).await
|
||||
} else {
|
||||
Err(ShellError::untagged_runtime_error(
|
||||
"Could not retreive CPU time",
|
||||
@ -106,49 +135,68 @@ async fn benchmark(
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "rich-benchmark"))]
|
||||
async fn benchmark(
|
||||
raw_args: CommandArgs,
|
||||
registry: &CommandRegistry,
|
||||
) -> Result<OutputStream, ShellError> {
|
||||
let registry = registry.clone();
|
||||
async fn benchmark_output<T, Output>(
|
||||
indexmap: IndexMap<String, BigInt>,
|
||||
block_output: Output,
|
||||
passthrough: Option<Block>,
|
||||
tag: T,
|
||||
context: &mut EvaluationContext,
|
||||
scope: &Scope,
|
||||
) -> Result<OutputStream, ShellError>
|
||||
where
|
||||
T: Into<Tag> + Copy,
|
||||
Output: Into<OutputStream>,
|
||||
{
|
||||
let value = UntaggedValue::Row(Dictionary::from(
|
||||
indexmap
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, UntaggedValue::duration(v).into_value(tag)))
|
||||
.collect::<IndexMap<String, Value>>(),
|
||||
))
|
||||
.into_value(tag);
|
||||
|
||||
let tag = raw_args.call_info.args.span;
|
||||
let mut context = EvaluationContext::from_raw(&raw_args, ®istry);
|
||||
let scope = raw_args.call_info.scope.clone();
|
||||
let (BenchmarkArgs { block }, input) = raw_args.process(®istry).await?;
|
||||
if let Some(time_block) = passthrough {
|
||||
let benchmark_output = InputStream::one(value);
|
||||
|
||||
let start_time = Instant::now();
|
||||
// add autoview for an empty block
|
||||
let time_block = add_implicit_autoview(time_block);
|
||||
|
||||
let result = run_block(
|
||||
&block,
|
||||
&mut context,
|
||||
input,
|
||||
let _ = run_block(
|
||||
&time_block,
|
||||
context,
|
||||
benchmark_output,
|
||||
&scope.it,
|
||||
&scope.vars,
|
||||
&scope.env,
|
||||
)
|
||||
.await;
|
||||
let _ = result?.drain_vec().await;
|
||||
|
||||
let end_time = Instant::now();
|
||||
.await?;
|
||||
context.clear_errors();
|
||||
|
||||
let mut indexmap = IndexMap::with_capacity(4);
|
||||
|
||||
let real_time = into_value(end_time - start_time, &tag);
|
||||
indexmap.insert("real time".to_string(), real_time);
|
||||
|
||||
let value = UntaggedValue::Row(Dictionary::from(indexmap)).into_value(&tag);
|
||||
Ok(OutputStream::one(value))
|
||||
Ok(block_output.into())
|
||||
} else {
|
||||
let benchmark_output = OutputStream::one(value);
|
||||
Ok(benchmark_output)
|
||||
}
|
||||
}
|
||||
|
||||
fn into_value<T: TryInto<Duration>>(time: T, tag: &Span) -> Value {
|
||||
UntaggedValue::duration(
|
||||
fn add_implicit_autoview(mut block: Block) -> Block {
|
||||
if block.block.is_empty() {
|
||||
block.push({
|
||||
let mut commands = Commands::new(block.span);
|
||||
commands.push(ClassifiedCommand::Internal(InternalCommand::new(
|
||||
"autoview".to_string(),
|
||||
block.span,
|
||||
block.span,
|
||||
)));
|
||||
commands
|
||||
});
|
||||
}
|
||||
block
|
||||
}
|
||||
|
||||
fn into_big_int<T: TryInto<Duration>>(time: T) -> BigInt {
|
||||
time.try_into()
|
||||
.unwrap_or_else(|_| Duration::new(0, 0))
|
||||
.as_nanos()
|
||||
.into(),
|
||||
)
|
||||
.into_value(tag)
|
||||
.into()
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user