2022-08-13 04:13:50 +02:00
|
|
|
/// Run a command in nu and get it's output
|
|
|
|
///
|
|
|
|
/// The `nu!` macro accepts a number of options like the `cwd` in which the
|
|
|
|
/// command should be run. It is also possible to specify a different `locale`
|
|
|
|
/// to test locale dependent commands.
|
|
|
|
///
|
|
|
|
/// Pass options as the first arguments in the form of `key_1: value_1, key_1:
|
|
|
|
/// value_2, ...`. The options are defined in the `NuOpts` struct inside the
|
|
|
|
/// `nu!` macro.
|
|
|
|
///
|
|
|
|
/// The command can be formatted using `{}` just like `println!` or `format!`.
|
|
|
|
/// Pass the format arguments comma separated after the command itself.
|
|
|
|
///
|
|
|
|
/// # Examples
|
|
|
|
///
|
|
|
|
/// ```no_run
|
|
|
|
/// # // NOTE: The `nu!` macro needs the `nu` binary to exist. The test are
|
|
|
|
/// # // therefore only compiled but not run (thats what the `no_run` at
|
|
|
|
/// # // the beginning of this code block is for).
|
|
|
|
/// #
|
|
|
|
/// use nu_test_support::nu;
|
|
|
|
///
|
|
|
|
/// let outcome = nu!(
|
|
|
|
/// "date now | date to-record | get year"
|
|
|
|
/// );
|
|
|
|
///
|
|
|
|
/// let dir = "/";
|
|
|
|
/// let outcome = nu!(
|
|
|
|
/// "ls {} | get name",
|
|
|
|
/// dir,
|
|
|
|
/// );
|
|
|
|
///
|
|
|
|
/// let outcome = nu!(
|
|
|
|
/// cwd: "/",
|
|
|
|
/// "ls | get name",
|
|
|
|
/// );
|
|
|
|
///
|
|
|
|
/// let cell = "size";
|
|
|
|
/// let outcome = nu!(
|
|
|
|
/// locale: "de_DE.UTF-8",
|
|
|
|
/// "ls | into int {}",
|
|
|
|
/// cell,
|
|
|
|
/// );
|
|
|
|
///
|
|
|
|
/// let decimals = 2;
|
|
|
|
/// let outcome = nu!(
|
|
|
|
/// locale: "de_DE.UTF-8",
|
|
|
|
/// "10 | into string --decimals {}",
|
|
|
|
/// decimals,
|
|
|
|
/// );
|
|
|
|
/// ```
|
2019-12-15 17:15:06 +01:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! nu {
|
2022-08-13 04:13:50 +02:00
|
|
|
// In the `@options` phase, we restucture all the
|
|
|
|
// `$field_1: $value_1, $field_2: $value_2, ...`
|
|
|
|
// pairs to a structure like
|
|
|
|
// `@options[ $field_1 => $value_1 ; $field_2 => $value_2 ; ... ]`.
|
|
|
|
// We do this to later distinguish the options from the `$path` and `$part`s.
|
|
|
|
// (See
|
|
|
|
// https://users.rust-lang.org/t/i-dont-think-this-local-ambiguity-when-calling-macro-is-ambiguous/79401?u=x3ro
|
|
|
|
// )
|
|
|
|
//
|
|
|
|
// If there is any special treatment needed for the `$value`, we can just
|
|
|
|
// match for the specific `field` name.
|
|
|
|
(
|
|
|
|
@options [ $($options:tt)* ]
|
|
|
|
cwd: $value:expr,
|
|
|
|
$($rest:tt)*
|
|
|
|
) => {
|
|
|
|
nu!(@options [ $($options)* cwd => $crate::fs::in_directory($value) ; ] $($rest)*)
|
|
|
|
};
|
|
|
|
// For all other options, we call `.into()` on the `$value` and hope for the best. ;)
|
|
|
|
(
|
|
|
|
@options [ $($options:tt)* ]
|
|
|
|
$field:ident : $value:expr,
|
|
|
|
$($rest:tt)*
|
|
|
|
) => {
|
|
|
|
nu!(@options [ $($options)* $field => $value.into() ; ] $($rest)*)
|
|
|
|
};
|
|
|
|
|
|
|
|
// When the `$field: $value,` pairs are all parsed, the next tokens are the `$path` and any
|
|
|
|
// number of `$part`s, potentially followed by a trailing comma.
|
|
|
|
(
|
|
|
|
@options [ $($options:tt)* ]
|
|
|
|
$path:expr
|
|
|
|
$(, $part:expr)*
|
|
|
|
$(,)*
|
|
|
|
) => {{
|
|
|
|
// Here we parse the options into a `NuOpts` struct
|
|
|
|
let opts = nu!(@nu_opts $($options)*);
|
|
|
|
// and format the `$path` using the `$part`s
|
|
|
|
let path = nu!(@format_path $path, $($part),*);
|
|
|
|
// Then finally we go to the `@main` phase, where the actual work is done.
|
|
|
|
nu!(@main opts, path)
|
2019-12-15 17:15:06 +01:00
|
|
|
}};
|
|
|
|
|
2022-08-13 04:13:50 +02:00
|
|
|
// Create the NuOpts struct from the `field => value ;` pairs
|
|
|
|
(@nu_opts $( $field:ident => $value:expr ; )*) => {
|
|
|
|
NuOpts{
|
|
|
|
$(
|
|
|
|
$field: Some($value),
|
|
|
|
)*
|
|
|
|
..Default::default()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Helper to format `$path`.
|
|
|
|
(@format_path $path:expr $(,)?) => {
|
|
|
|
// When there are no `$part`s, do not format anything
|
|
|
|
$path
|
|
|
|
};
|
|
|
|
(@format_path $path:expr, $($part:expr),* $(,)?) => {{
|
|
|
|
format!($path, $( $part ),*)
|
2019-12-15 17:15:06 +01:00
|
|
|
}};
|
|
|
|
|
2022-08-13 04:13:50 +02:00
|
|
|
// Do the actual work.
|
|
|
|
(@main $opts:expr, $path:expr) => {{
|
2020-09-14 16:07:02 +02:00
|
|
|
pub use std::error::Error;
|
|
|
|
pub use std::io::prelude::*;
|
|
|
|
pub use std::process::{Command, Stdio};
|
2021-05-13 05:03:49 +02:00
|
|
|
pub use $crate::NATIVE_PATH_ENV_VAR;
|
2020-09-14 16:07:02 +02:00
|
|
|
|
2022-05-01 23:49:31 +02:00
|
|
|
pub fn escape_quote_string(input: String) -> String {
|
|
|
|
let mut output = String::with_capacity(input.len() + 2);
|
|
|
|
output.push('"');
|
|
|
|
|
|
|
|
for c in input.chars() {
|
|
|
|
if c == '"' || c == '\\' {
|
|
|
|
output.push('\\');
|
|
|
|
}
|
|
|
|
output.push(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
output.push('"');
|
|
|
|
output
|
|
|
|
}
|
|
|
|
|
2022-02-02 21:59:01 +01:00
|
|
|
let test_bins = $crate::fs::binaries();
|
|
|
|
|
|
|
|
let cwd = std::env::current_dir().expect("Could not get current working directory.");
|
2022-12-15 18:53:26 +01:00
|
|
|
let test_bins = $crate::nu_path::canonicalize_with(&test_bins, cwd).unwrap_or_else(|e| {
|
2020-09-14 16:07:02 +02:00
|
|
|
panic!(
|
|
|
|
"Couldn't canonicalize dummy binaries path {}: {:?}",
|
|
|
|
test_bins.display(),
|
|
|
|
e
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
let mut paths = $crate::shell_os_paths();
|
|
|
|
paths.insert(0, test_bins);
|
|
|
|
|
2022-02-02 21:59:01 +01:00
|
|
|
let path = $path.lines().collect::<Vec<_>>().join("; ");
|
|
|
|
|
2021-09-10 00:44:22 +02:00
|
|
|
let paths_joined = match std::env::join_paths(paths) {
|
2020-09-14 16:07:02 +02:00
|
|
|
Ok(all) => all,
|
|
|
|
Err(_) => panic!("Couldn't join paths for PATH var."),
|
|
|
|
};
|
|
|
|
|
2022-08-13 04:13:50 +02:00
|
|
|
let target_cwd = $opts.cwd.unwrap_or(".".to_string());
|
|
|
|
let locale = $opts.locale.unwrap_or("en_US.UTF-8".to_string());
|
2022-02-17 23:23:04 +01:00
|
|
|
|
2022-08-13 04:13:50 +02:00
|
|
|
let mut command = Command::new($crate::fs::executable_path());
|
|
|
|
command
|
2022-06-10 20:01:08 +02:00
|
|
|
.env("PWD", &target_cwd)
|
2022-08-13 04:13:50 +02:00
|
|
|
.env(nu_utils::locale::LOCALE_OVERRIDE_ENV_VAR, locale)
|
2022-06-10 20:01:08 +02:00
|
|
|
.current_dir(target_cwd)
|
2021-05-13 05:03:49 +02:00
|
|
|
.env(NATIVE_PATH_ENV_VAR, paths_joined)
|
2022-02-02 21:59:01 +01:00
|
|
|
// .arg("--skip-plugins")
|
|
|
|
// .arg("--no-history")
|
|
|
|
// .arg("--config-file")
|
|
|
|
// .arg($crate::fs::DisplayPath::display_path(&$crate::fs::fixtures().join("playground/config/default.toml")))
|
2022-08-13 04:13:50 +02:00
|
|
|
.arg(format!("-c {}", escape_quote_string(path)))
|
2022-02-02 21:59:01 +01:00
|
|
|
.stdout(Stdio::piped())
|
|
|
|
// .stdin(Stdio::piped())
|
2022-08-13 04:13:50 +02:00
|
|
|
.stderr(Stdio::piped());
|
|
|
|
|
|
|
|
let mut process = match command.spawn()
|
2020-09-14 16:07:02 +02:00
|
|
|
{
|
|
|
|
Ok(child) => child,
|
2022-02-02 21:59:01 +01:00
|
|
|
Err(why) => panic!("Can't run test {:?} {}", $crate::fs::executable_path(), why.to_string()),
|
|
|
|
};
|
|
|
|
|
|
|
|
// let stdin = process.stdin.as_mut().expect("couldn't open stdin");
|
|
|
|
// stdin
|
|
|
|
// .write_all(b"exit\n")
|
|
|
|
// .expect("couldn't write to stdin");
|
2020-09-14 16:07:02 +02:00
|
|
|
|
|
|
|
let output = process
|
|
|
|
.wait_with_output()
|
|
|
|
.expect("couldn't read from stdout/stderr");
|
|
|
|
|
|
|
|
let out = $crate::macros::read_std(&output.stdout);
|
|
|
|
let err = String::from_utf8_lossy(&output.stderr);
|
|
|
|
|
|
|
|
println!("=== stderr\n{}", err);
|
|
|
|
|
2021-02-19 02:24:27 +01:00
|
|
|
$crate::Outcome::new(out,err.into_owned())
|
2020-09-14 16:07:02 +02:00
|
|
|
}};
|
2022-08-13 04:13:50 +02:00
|
|
|
|
|
|
|
// This is the entrypoint for this macro.
|
|
|
|
($($token:tt)*) => {{
|
|
|
|
#[derive(Default)]
|
|
|
|
struct NuOpts {
|
|
|
|
cwd: Option<String>,
|
|
|
|
locale: Option<String>,
|
|
|
|
}
|
|
|
|
|
|
|
|
nu!(@options [ ] $($token)*)
|
|
|
|
}};
|
2020-09-14 16:07:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[macro_export]
|
2022-07-22 06:14:37 +02:00
|
|
|
macro_rules! with_exe {
|
|
|
|
($name:literal) => {{
|
|
|
|
#[cfg(windows)]
|
|
|
|
{
|
|
|
|
concat!($name, ".exe")
|
|
|
|
}
|
|
|
|
#[cfg(not(windows))]
|
|
|
|
{
|
|
|
|
$name
|
|
|
|
}
|
2020-09-14 16:07:02 +02:00
|
|
|
}};
|
2022-07-22 06:14:37 +02:00
|
|
|
}
|
2020-09-14 16:07:02 +02:00
|
|
|
|
2022-07-22 06:14:37 +02:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! nu_with_plugins {
|
2022-09-07 16:07:42 +02:00
|
|
|
(cwd: $cwd:expr, plugins: [$(($plugin_name:expr)),+$(,)?], $command:expr) => {{
|
|
|
|
nu_with_plugins!($cwd, [$(("", $plugin_name)),+], $command)
|
2022-07-22 06:14:37 +02:00
|
|
|
}};
|
2022-09-07 16:07:42 +02:00
|
|
|
(cwd: $cwd:expr, plugin: ($plugin_name:expr), $command:expr) => {{
|
|
|
|
nu_with_plugins!($cwd, [("", $plugin_name)], $command)
|
2020-09-14 16:07:02 +02:00
|
|
|
}};
|
|
|
|
|
2022-07-22 06:14:37 +02:00
|
|
|
($cwd:expr, [$(($format:expr, $plugin_name:expr)),+$(,)?], $command:expr) => {{
|
2019-12-15 17:15:06 +01:00
|
|
|
pub use std::error::Error;
|
|
|
|
pub use std::io::prelude::*;
|
|
|
|
pub use std::process::{Command, Stdio};
|
2022-07-22 06:14:37 +02:00
|
|
|
pub use tempfile::tempdir;
|
|
|
|
pub use $crate::{NATIVE_PATH_ENV_VAR, with_exe};
|
2019-12-15 17:15:06 +01:00
|
|
|
|
2020-01-23 17:21:05 +01:00
|
|
|
let test_bins = $crate::fs::binaries();
|
2022-07-22 06:14:37 +02:00
|
|
|
let test_bins = nu_path::canonicalize_with(&test_bins, ".").unwrap_or_else(|e| {
|
2020-01-14 05:17:20 +01:00
|
|
|
panic!(
|
|
|
|
"Couldn't canonicalize dummy binaries path {}: {:?}",
|
2020-01-23 17:21:05 +01:00
|
|
|
test_bins.display(),
|
2020-01-14 05:17:20 +01:00
|
|
|
e
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
2022-07-22 06:14:37 +02:00
|
|
|
let temp = tempdir().expect("couldn't create a temporary directory");
|
|
|
|
let temp_plugin_file = temp.path().join("plugin.nu");
|
|
|
|
std::fs::File::create(&temp_plugin_file).expect("couldn't create temporary plugin file");
|
|
|
|
|
|
|
|
$($crate::commands::ensure_binary_present($plugin_name);)+
|
|
|
|
|
2022-09-07 16:07:42 +02:00
|
|
|
// TODO: the `$format` is a dummy empty string, but `plugin_name` is repeatable
|
|
|
|
// just keep it here for now. Need to find a way to remove it.
|
2022-07-22 06:14:37 +02:00
|
|
|
let registrations = format!(
|
2022-09-07 16:07:42 +02:00
|
|
|
concat!($(concat!("register ", $format, " {};")),+),
|
2022-07-22 06:14:37 +02:00
|
|
|
$(
|
|
|
|
nu_path::canonicalize_with(with_exe!($plugin_name), &test_bins)
|
|
|
|
.unwrap_or_else(|e| {
|
|
|
|
panic!("failed to canonicalize plugin {} path", $plugin_name)
|
|
|
|
})
|
|
|
|
.display()
|
|
|
|
),+
|
|
|
|
);
|
|
|
|
let commands = format!("{registrations}{}", $command);
|
2020-01-23 17:21:05 +01:00
|
|
|
|
2022-02-17 23:23:04 +01:00
|
|
|
let target_cwd = $crate::fs::in_directory(&$cwd);
|
2019-12-15 17:15:06 +01:00
|
|
|
let mut process = match Command::new($crate::fs::executable_path())
|
2022-07-22 06:14:37 +02:00
|
|
|
.current_dir(&target_cwd)
|
|
|
|
.env("PWD", &target_cwd) // setting PWD is enough to set cwd
|
|
|
|
.arg("--commands")
|
|
|
|
.arg(commands)
|
|
|
|
.arg("--plugin-config")
|
|
|
|
.arg(temp_plugin_file)
|
2019-12-15 17:15:06 +01:00
|
|
|
.stdout(Stdio::piped())
|
2020-05-07 13:03:43 +02:00
|
|
|
.stderr(Stdio::piped())
|
2019-12-15 17:15:06 +01:00
|
|
|
.spawn()
|
|
|
|
{
|
|
|
|
Ok(child) => child,
|
2019-12-31 08:36:08 +01:00
|
|
|
Err(why) => panic!("Can't run test {}", why.to_string()),
|
2019-12-15 17:15:06 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
let output = process
|
|
|
|
.wait_with_output()
|
2020-05-07 13:03:43 +02:00
|
|
|
.expect("couldn't read from stdout/stderr");
|
2019-12-15 17:15:06 +01:00
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
let out = $crate::macros::read_std(&output.stdout);
|
2020-05-07 13:03:43 +02:00
|
|
|
let err = String::from_utf8_lossy(&output.stderr);
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
|
2022-07-22 06:14:37 +02:00
|
|
|
println!("=== stderr\n{}", err);
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
|
2022-07-22 06:14:37 +02:00
|
|
|
$crate::Outcome::new(out, err.into_owned())
|
2019-12-15 17:15:06 +01:00
|
|
|
}};
|
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
pub fn read_std(std: &[u8]) -> String {
|
|
|
|
let out = String::from_utf8_lossy(std);
|
2021-03-27 06:08:03 +01:00
|
|
|
let out = out.lines().collect::<Vec<_>>().join("\n");
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
let out = out.replace("\r\n", "");
|
2022-02-24 20:02:28 +01:00
|
|
|
out.replace('\n', "")
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
}
|