mirror of
https://github.com/nushell/nushell.git
synced 2025-08-10 05:58:13 +02:00
Check for interrupt before each IR instruction (#16134)
# Description Fixes #15308. Surprisingly, I can't find any more issues that bring this up. This PR adds a check for interrupt before evaluating each IR instruction. This makes it possible to ctrl-c out of things that were difficult/impossible previously, like: ```nushell loop {} ``` @devyn also [mentioned previously](https://discord.com/channels/601130461678272522/615329862395101194/1268674828492083327) that this might be a feasible option, but mentioned some performance concerns. I did some benchmarking previously but it turns out tango isn't the greatest for this. I don't have hard numbers anymore but based on some experimenting I shared in the Discord it seems like no experimental option and limiting this to specific instructions should keep the performance impact minimal. <details> <summary>Old benchmarking info</summary> I did some benchmarks against main. It seems like most benchmarks are between 1-6% slower, with some oddball exceptions. The worst case seems to a giant loop, which makes sense since it's mostly just jumping back to the beginning, meaning we hit the check over and over again. With `bench --pretty -n 100 { for i in 0..1000000 { 1 } }`, it seems like the `ir-interrupt` branch is ~10% slower for this stress case: ``` main: 94ms 323µs 29ns +/- 1ms 291µs 759ns ir-interrupt: 103ms 54µs 708ns +/- 1ms 606µs 571ns (103ms + 54µs + 708ns) / (94ms + 323µs + 29ns) = 1.0925720801438639 ``` The performance numbers here aren't great, but they're not terrible either, and I think the usability improvement is probably worth it here. <details> <summary>Tango benchmarks</summary> ``` load_standard_lib [ 2.8 ms ... 2.9 ms ] +3.53%* record_create_1 [ 101.8 us ... 107.0 us ] +5.12%* record_create_10 [ 132.3 us ... 135.6 us ] +2.48%* record_create_100 [ 349.7 us ... 354.6 us ] +1.39%* record_create_1000 [ 3.7 ms ... 3.6 ms ] -1.30% record_flat_access_1 [ 94.8 us ... 99.9 us ] +5.35%* record_flat_access_10 [ 96.2 us ... 101.3 us ] +5.33%* record_flat_access_100 [ 106.0 us ... 111.4 us ] +5.07%* record_flat_access_1000 [ 203.0 us ... 208.5 us ] +2.69% record_nested_access_1 [ 95.3 us ... 100.1 us ] +5.03%* record_nested_access_2 [ 98.4 us ... 104.6 us ] +6.26%* record_nested_access_4 [ 100.8 us ... 105.4 us ] +4.56%* record_nested_access_8 [ 104.7 us ... 108.1 us ] +3.23% record_nested_access_16 [ 110.6 us ... 115.8 us ] +4.71%* record_nested_access_32 [ 119.0 us ... 124.0 us ] +4.20%* record_nested_access_64 [ 137.4 us ... 142.2 us ] +3.47%* record_nested_access_128 [ 176.7 us ... 181.8 us ] +2.87%* record_insert_1_1 [ 106.2 us ... 111.9 us ] +5.35%* record_insert_10_1 [ 111.2 us ... 116.0 us ] +4.28%* record_insert_100_1 [ 134.3 us ... 139.8 us ] +4.06%* record_insert_1000_1 [ 387.8 us ... 417.1 us ] +7.55% record_insert_1_10 [ 162.9 us ... 171.5 us ] +5.23%* record_insert_10_10 [ 159.5 us ... 166.2 us ] +4.23%* record_insert_100_10 [ 183.3 us ... 190.7 us ] +4.04%* record_insert_1000_10 [ 411.7 us ... 418.7 us ] +1.71%* table_create_1 [ 117.2 us ... 120.3 us ] +2.66% table_create_10 [ 144.9 us ... 152.2 us ] +5.02%* table_create_100 [ 476.0 us ... 484.8 us ] +1.86%* table_create_1000 [ 3.7 ms ... 3.7 ms ] +1.55% table_get_1 [ 121.6 us ... 127.0 us ] +4.40%* table_get_10 [ 112.1 us ... 117.4 us ] +4.71%* table_get_100 [ 124.0 us ... 129.5 us ] +4.41%* table_get_1000 [ 229.9 us ... 245.5 us ] +6.75%* table_select_1 [ 111.1 us ... 115.6 us ] +4.03%* table_select_10 [ 112.1 us ... 118.4 us ] +5.65%* table_select_100 [ 142.2 us ... 147.2 us ] +3.53%* table_select_1000 [ 383.5 us ... 370.3 us ] -3.43%* table_insert_row_1_1 [ 122.5 us ... 127.8 us ] +4.35%* table_insert_row_10_1 [ 123.6 us ... 128.6 us ] +3.99%* table_insert_row_100_1 [ 124.9 us ... 131.2 us ] +5.05%* table_insert_row_1000_1 [ 177.3 us ... 182.7 us ] +3.07%* table_insert_row_1_10 [ 225.7 us ... 234.5 us ] +3.90%* table_insert_row_10_10 [ 229.2 us ... 235.4 us ] +2.69%* table_insert_row_100_10 [ 253.3 us ... 257.6 us ] +1.69% table_insert_row_1000_10 [ 311.1 us ... 318.3 us ] +2.32%* table_insert_col_1_1 [ 107.6 us ... 113.3 us ] +5.35%* table_insert_col_10_1 [ 109.6 us ... 115.3 us ] +5.27%* table_insert_col_100_1 [ 155.5 us ... 159.8 us ] +2.71%* table_insert_col_1000_1 [ 476.6 us ... 480.8 us ] +0.88%* table_insert_col_1_10 [ 167.7 us ... 177.4 us ] +5.75% table_insert_col_10_10 [ 178.5 us ... 194.8 us ] +9.16%* table_insert_col_100_10 [ 314.4 us ... 322.3 us ] +2.53%* table_insert_col_1000_10 [ 1.7 ms ... 1.7 ms ] +1.35%* eval_interleave_100 [ 485.8 us ... 506.5 us ] +4.27% eval_interleave_1000 [ 3.3 ms ... 3.2 ms ] -1.51% eval_interleave_10000 [ 31.5 ms ... 31.0 ms ] -1.80% eval_interleave_with_interrupt_100 [ 473.2 us ... 479.6 us ] +1.35% eval_interleave_with_interrupt_1000 [ 3.2 ms ... 3.2 ms ] -1.26% eval_interleave_with_interrupt_10000 [ 32.3 ms ... 31.1 ms ] -3.81% eval_for_1 [ 124.4 us ... 130.1 us ] +4.60%* eval_for_10 [ 124.4 us ... 130.3 us ] +4.80%* eval_for_100 [ 134.5 us ... 141.5 us ] +5.18%* eval_for_1000 [ 222.7 us ... 244.0 us ] +9.59%* eval_for_10000 [ 1.0 ms ... 1.2 ms ] +13.86%* eval_each_1 [ 146.9 us ... 153.0 us ] +4.15%* eval_each_10 [ 152.3 us ... 158.8 us ] +4.26%* eval_each_100 [ 169.3 us ... 175.6 us ] +3.76%* eval_each_1000 [ 346.8 us ... 357.4 us ] +3.06%* eval_each_10000 [ 2.1 ms ... 2.2 ms ] +2.37%* eval_par_each_1 [ 194.3 us ... 203.5 us ] +4.73%* eval_par_each_10 [ 186.4 us ... 193.1 us ] +3.59%* eval_par_each_100 [ 213.5 us ... 222.2 us ] +4.08%* eval_par_each_1000 [ 433.4 us ... 437.4 us ] +0.93% eval_par_each_10000 [ 2.4 ms ... 2.4 ms ] -0.40% eval_default_config [ 406.3 us ... 414.9 us ] +2.12%* eval_default_env [ 475.7 us ... 495.1 us ] +4.08%* encode_json_100_5 [ 132.7 us ... 128.9 us ] -2.88%* encode_json_10000_15 [ 35.5 ms ... 34.0 ms ] -4.15%* encode_msgpack_100_5 [ 85.0 us ... 83.9 us ] -1.20%* encode_msgpack_10000_15 [ 22.2 ms ... 21.7 ms ] -2.17%* decode_json_100_5 [ 431.3 us ... 421.0 us ] -2.40%* decode_json_10000_15 [ 119.7 ms ... 117.6 ms ] -1.76%* decode_msgpack_100_5 [ 160.6 us ... 151.1 us ] -5.90%* decode_msgpack_10000_15 [ 44.0 ms ... 43.1 ms ] -2.12%* ``` </details> </details> # User-Facing Changes * It should be possible to ctrl-c in situations where it was not previously possible # Tests + Formatting N/A # After Submitting N/A
This commit is contained in:
@ -241,7 +241,7 @@ fn insert_in_transaction(
|
||||
let tx = table.try_init(&first_val)?;
|
||||
|
||||
for stream_value in stream {
|
||||
if let Err(err) = signals.check(span) {
|
||||
if let Err(err) = signals.check(&span) {
|
||||
tx.rollback().map_err(|e| ShellError::GenericError {
|
||||
error: "Failed to rollback SQLite transaction".into(),
|
||||
msg: e.to_string(),
|
||||
|
@ -588,7 +588,7 @@ fn prepared_statement_to_nu_list(
|
||||
let mut row_values = vec![];
|
||||
|
||||
for row_result in row_results {
|
||||
signals.check(call_span)?;
|
||||
signals.check(&call_span)?;
|
||||
if let Ok(row_value) = row_result {
|
||||
row_values.push(row_value);
|
||||
}
|
||||
@ -614,7 +614,7 @@ fn prepared_statement_to_nu_list(
|
||||
let mut row_values = vec![];
|
||||
|
||||
for row_result in row_results {
|
||||
signals.check(call_span)?;
|
||||
signals.check(&call_span)?;
|
||||
if let Ok(row_value) = row_result {
|
||||
row_values.push(row_value);
|
||||
}
|
||||
|
@ -329,7 +329,7 @@ fn glob_to_value(
|
||||
) -> ListStream {
|
||||
let map_signals = signals.clone();
|
||||
let result = glob_results.filter_map(move |entry| {
|
||||
if let Err(err) = map_signals.check(span) {
|
||||
if let Err(err) = map_signals.check(&span) {
|
||||
return Some(Value::error(err, span));
|
||||
};
|
||||
let file_type = entry.file_type();
|
||||
|
@ -341,7 +341,7 @@ fn ls_for_one_pattern(
|
||||
|
||||
let mut paths_peek = paths.peekable();
|
||||
let no_matches = paths_peek.peek().is_none();
|
||||
signals.check(call_span)?;
|
||||
signals.check(&call_span)?;
|
||||
if no_matches {
|
||||
return Err(ShellError::GenericError {
|
||||
error: format!("No matches found for {:?}", path.item),
|
||||
@ -979,14 +979,14 @@ fn read_dir(
|
||||
.read_dir()
|
||||
.map_err(|err| IoError::new(err, span, f.clone()))?
|
||||
.map(move |d| {
|
||||
signals_clone.check(span)?;
|
||||
signals_clone.check(&span)?;
|
||||
d.map(|r| r.path())
|
||||
.map_err(|err| IoError::new(err, span, f.clone()))
|
||||
.map_err(ShellError::from)
|
||||
});
|
||||
if !use_threads {
|
||||
let mut collected = items.collect::<Vec<_>>();
|
||||
signals.check(span)?;
|
||||
signals.check(&span)?;
|
||||
collected.sort_by(|a, b| match (a, b) {
|
||||
(Ok(a), Ok(b)) => a.cmp(b),
|
||||
(Ok(_), Err(_)) => Ordering::Greater,
|
||||
|
@ -454,7 +454,7 @@ fn rm(
|
||||
});
|
||||
|
||||
for result in iter {
|
||||
engine_state.signals().check(call.head)?;
|
||||
engine_state.signals().check(&call.head)?;
|
||||
match result {
|
||||
Ok(None) => {}
|
||||
Ok(Some(msg)) => eprintln!("{msg}"),
|
||||
|
@ -543,7 +543,7 @@ fn stream_to_file(
|
||||
let mut reader = BufReader::new(source);
|
||||
|
||||
let res = loop {
|
||||
if let Err(err) = signals.check(span) {
|
||||
if let Err(err) = signals.check(&span) {
|
||||
bar.abandoned_msg("# Cancelled #".to_owned());
|
||||
return Err(err);
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ impl Command for Last {
|
||||
let mut buf = VecDeque::new();
|
||||
|
||||
for row in iterator {
|
||||
engine_state.signals().check(head)?;
|
||||
engine_state.signals().check(&head)?;
|
||||
if buf.len() == rows {
|
||||
buf.pop_front();
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ impl Command for Reduce {
|
||||
let mut closure = ClosureEval::new(engine_state, stack, closure);
|
||||
|
||||
for value in iter {
|
||||
engine_state.signals().check(head)?;
|
||||
engine_state.signals().check(&head)?;
|
||||
acc = closure
|
||||
.add_arg(value)
|
||||
.add_arg(acc.clone())
|
||||
|
@ -31,7 +31,7 @@ pub fn boolean_fold(
|
||||
let mut closure = ClosureEval::new(engine_state, stack, closure);
|
||||
|
||||
for value in input {
|
||||
engine_state.signals().check(head)?;
|
||||
engine_state.signals().check(&head)?;
|
||||
let pred = closure.run_with_value(value)?.into_value(head)?.is_true();
|
||||
|
||||
if pred == accumulator {
|
||||
|
@ -468,7 +468,7 @@ fn send_cancellable_request(
|
||||
|
||||
// ...and poll the channel for responses
|
||||
loop {
|
||||
signals.check(span)?;
|
||||
signals.check(&span)?;
|
||||
|
||||
// 100ms wait time chosen arbitrarily
|
||||
match rx.recv_timeout(Duration::from_millis(100)) {
|
||||
@ -526,7 +526,7 @@ fn send_cancellable_request_bytes(
|
||||
|
||||
// ...and poll the channel for responses
|
||||
loop {
|
||||
signals.check(span)?;
|
||||
signals.check(&span)?;
|
||||
|
||||
// 100ms wait time chosen arbitrarily
|
||||
match rx.recv_timeout(Duration::from_millis(100)) {
|
||||
|
@ -115,7 +115,7 @@ impl DirInfo {
|
||||
match std::fs::read_dir(&s.path) {
|
||||
Ok(d) => {
|
||||
for f in d {
|
||||
signals.check(span)?;
|
||||
signals.check(&span)?;
|
||||
|
||||
match f {
|
||||
Ok(i) => match i.file_type() {
|
||||
|
@ -56,7 +56,7 @@ impl Command for Sleep {
|
||||
break;
|
||||
}
|
||||
thread::sleep(CTRL_C_CHECK_INTERVAL.min(time_until_deadline));
|
||||
engine_state.signals().check(call.head)?;
|
||||
engine_state.signals().check(&call.head)?;
|
||||
}
|
||||
|
||||
Ok(Value::nothing(call.head).into_pipeline_data())
|
||||
|
@ -431,7 +431,7 @@ fn expand_glob(
|
||||
let mut result: Vec<OsString> = vec![];
|
||||
|
||||
for m in matches {
|
||||
signals.check(span)?;
|
||||
signals.check(&span)?;
|
||||
if let Ok(arg) = m {
|
||||
let arg = resolve_globbed_path_to_cwd_relative(arg, prefix.as_ref(), cwd);
|
||||
result.push(arg.into());
|
||||
|
Reference in New Issue
Block a user