mirror of
https://github.com/nushell/nushell.git
synced 2024-11-28 19:33:47 +01:00
d717e8faeb
# Description This PR is kind of two PRs in one because they were dependent on each other. PR1 -3de58d4dc2
with update7fcdb242d9
- This follows our mantra of having everything with defaults written in nushell rust code. So, that if you run without a config, you get the same behavior as with the default config/env files. This sets NU_LIB_DIRS to $nu.config-path/scripts and sets NU_PLUGIN_DIRS to $nu.config-path/plugins. PR2 -0e8ac876fd
- The benchmarks have been broke for some time and we didn't notice it. This PR fixes that. It's dependent on PR1 because it was throwing errors because PWD needed to be set to a valid folder and `$nu` did not exist based on how the benchmark was setup. I've tested the benchmarks and they run without error now and I've also launched nushell as `nu -n --no-std-lib` and the env vars exist. closes #11236 # User-Facing Changes <!-- List of all changes that impact the user experience here. This helps us keep track of breaking changes. --> # Tests + Formatting <!-- Don't forget to add tests that cover your changes. Make sure you've run and fixed any issues with these commands: - `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used` to check that you're using the standard code style - `cargo test --workspace` to check that all tests pass (on Windows make sure to [enable developer mode](https://learn.microsoft.com/en-us/windows/apps/get-started/developer-mode-features-and-debugging)) - `cargo run -- -c "use std testing; testing run-tests --path crates/nu-std"` to run the tests for the standard library > **Note** > from `nushell` you can also use the `toolkit` as follows > ```bash > use toolkit.nu # or use an `env_change` hook to activate it automatically > toolkit check pr > ``` --> # After Submitting <!-- If your PR had any user-facing changes, update [the documentation](https://github.com/nushell/nushell.github.io) after the PR is merged, if necessary. This will help us keep the docs up to date. -->
195 lines
6.4 KiB
Rust
195 lines
6.4 KiB
Rust
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
|
|
use nu_cli::eval_source;
|
|
use nu_parser::parse;
|
|
use nu_plugin::{EncodingType, PluginResponse};
|
|
use nu_protocol::{engine::EngineState, PipelineData, Span, Value};
|
|
use nu_utils::{get_default_config, get_default_env};
|
|
use std::path::{Path, PathBuf};
|
|
|
|
fn load_bench_commands() -> EngineState {
|
|
nu_command::add_shell_command_context(nu_cmd_lang::create_default_context())
|
|
}
|
|
|
|
fn canonicalize_path(engine_state: &EngineState, path: &Path) -> PathBuf {
|
|
let cwd = engine_state.current_work_dir();
|
|
|
|
if path.exists() {
|
|
match nu_path::canonicalize_with(path, cwd) {
|
|
Ok(canon_path) => canon_path,
|
|
Err(_) => path.to_owned(),
|
|
}
|
|
} else {
|
|
path.to_owned()
|
|
}
|
|
}
|
|
|
|
fn get_home_path(engine_state: &EngineState) -> PathBuf {
|
|
let home_path = if let Some(path) = nu_path::home_dir() {
|
|
let canon_home_path = canonicalize_path(engine_state, &path);
|
|
canon_home_path
|
|
} else {
|
|
std::path::PathBuf::new()
|
|
};
|
|
home_path
|
|
}
|
|
|
|
// FIXME: All benchmarks live in this 1 file to speed up build times when benchmarking.
|
|
// When the *_benchmarks functions were in different files, `cargo bench` would build
|
|
// an executable for every single one - incredibly slowly. Would be nice to figure out
|
|
// a way to split things up again.
|
|
|
|
fn parser_benchmarks(c: &mut Criterion) {
|
|
let mut engine_state = load_bench_commands();
|
|
let home_path = get_home_path(&engine_state);
|
|
|
|
// parsing config.nu breaks without PWD set, so set a valid path
|
|
engine_state.add_env_var(
|
|
"PWD".into(),
|
|
Value::string(home_path.to_string_lossy(), Span::test_data()),
|
|
);
|
|
|
|
let default_env = get_default_env().as_bytes();
|
|
c.bench_function("parse_default_env_file", |b| {
|
|
b.iter_batched(
|
|
|| nu_protocol::engine::StateWorkingSet::new(&engine_state),
|
|
|mut working_set| parse(&mut working_set, None, default_env, false),
|
|
BatchSize::SmallInput,
|
|
)
|
|
});
|
|
|
|
let default_config = get_default_config().as_bytes();
|
|
c.bench_function("parse_default_config_file", |b| {
|
|
b.iter_batched(
|
|
|| nu_protocol::engine::StateWorkingSet::new(&engine_state),
|
|
|mut working_set| parse(&mut working_set, None, default_config, false),
|
|
BatchSize::SmallInput,
|
|
)
|
|
});
|
|
|
|
c.bench_function("eval default_env.nu", |b| {
|
|
b.iter(|| {
|
|
let mut stack = nu_protocol::engine::Stack::new();
|
|
eval_source(
|
|
&mut engine_state,
|
|
&mut stack,
|
|
get_default_env().as_bytes(),
|
|
"default_env.nu",
|
|
PipelineData::empty(),
|
|
false,
|
|
)
|
|
})
|
|
});
|
|
|
|
c.bench_function("eval default_config.nu", |b| {
|
|
b.iter(|| {
|
|
let mut stack = nu_protocol::engine::Stack::new();
|
|
eval_source(
|
|
&mut engine_state,
|
|
&mut stack,
|
|
get_default_config().as_bytes(),
|
|
"default_config.nu",
|
|
PipelineData::empty(),
|
|
false,
|
|
)
|
|
})
|
|
});
|
|
}
|
|
|
|
fn eval_benchmarks(c: &mut Criterion) {
|
|
let mut engine_state = load_bench_commands();
|
|
let home_path = get_home_path(&engine_state);
|
|
|
|
// parsing config.nu breaks without PWD set, so set a valid path
|
|
engine_state.add_env_var(
|
|
"PWD".into(),
|
|
Value::string(home_path.to_string_lossy(), Span::test_data()),
|
|
);
|
|
|
|
c.bench_function("eval default_env.nu", |b| {
|
|
b.iter(|| {
|
|
let mut stack = nu_protocol::engine::Stack::new();
|
|
eval_source(
|
|
&mut engine_state,
|
|
&mut stack,
|
|
get_default_env().as_bytes(),
|
|
"default_env.nu",
|
|
PipelineData::empty(),
|
|
false,
|
|
)
|
|
})
|
|
});
|
|
|
|
c.bench_function("eval default_config.nu", |b| {
|
|
b.iter(|| {
|
|
let mut stack = nu_protocol::engine::Stack::new();
|
|
eval_source(
|
|
&mut engine_state,
|
|
&mut stack,
|
|
get_default_config().as_bytes(),
|
|
"default_config.nu",
|
|
PipelineData::empty(),
|
|
false,
|
|
)
|
|
})
|
|
});
|
|
}
|
|
|
|
// generate a new table data with `row_cnt` rows, `col_cnt` columns.
|
|
fn encoding_test_data(row_cnt: usize, col_cnt: usize) -> Value {
|
|
let record = Value::test_record(
|
|
(0..col_cnt)
|
|
.map(|x| (format!("col_{x}"), Value::test_int(x as i64)))
|
|
.collect(),
|
|
);
|
|
|
|
Value::list(vec![record; row_cnt], Span::test_data())
|
|
}
|
|
|
|
fn encoding_benchmarks(c: &mut Criterion) {
|
|
let mut group = c.benchmark_group("Encoding");
|
|
let test_cnt_pairs = [(100, 5), (100, 15), (10000, 5), (10000, 15)];
|
|
for (row_cnt, col_cnt) in test_cnt_pairs.into_iter() {
|
|
for fmt in ["json", "msgpack"] {
|
|
group.bench_function(&format!("{fmt} encode {row_cnt} * {col_cnt}"), |b| {
|
|
let mut res = vec![];
|
|
let test_data =
|
|
PluginResponse::Value(Box::new(encoding_test_data(row_cnt, col_cnt)));
|
|
let encoder = EncodingType::try_from_bytes(fmt.as_bytes()).unwrap();
|
|
b.iter(|| encoder.encode_response(&test_data, &mut res))
|
|
});
|
|
}
|
|
}
|
|
group.finish();
|
|
}
|
|
|
|
fn decoding_benchmarks(c: &mut Criterion) {
|
|
let mut group = c.benchmark_group("Decoding");
|
|
let test_cnt_pairs = [(100, 5), (100, 15), (10000, 5), (10000, 15)];
|
|
for (row_cnt, col_cnt) in test_cnt_pairs.into_iter() {
|
|
for fmt in ["json", "msgpack"] {
|
|
group.bench_function(&format!("{fmt} decode for {row_cnt} * {col_cnt}"), |b| {
|
|
let mut res = vec![];
|
|
let test_data =
|
|
PluginResponse::Value(Box::new(encoding_test_data(row_cnt, col_cnt)));
|
|
let encoder = EncodingType::try_from_bytes(fmt.as_bytes()).unwrap();
|
|
encoder.encode_response(&test_data, &mut res).unwrap();
|
|
let mut binary_data = std::io::Cursor::new(res);
|
|
b.iter(|| {
|
|
binary_data.set_position(0);
|
|
encoder.decode_response(&mut binary_data)
|
|
})
|
|
});
|
|
}
|
|
}
|
|
group.finish();
|
|
}
|
|
|
|
criterion_group!(
|
|
benches,
|
|
parser_benchmarks,
|
|
eval_benchmarks,
|
|
encoding_benchmarks,
|
|
decoding_benchmarks
|
|
);
|
|
criterion_main!(benches);
|