nushell/crates/nu-parser/src/lex.rs

402 lines
13 KiB
Rust
Raw Normal View History

2021-09-02 03:29:43 +02:00
use crate::ParseError;
use nu_protocol::Span;
2021-06-30 03:42:56 +02:00
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
2021-06-30 03:42:56 +02:00
pub enum TokenContents {
Item,
Comment,
Pipe,
Semicolon,
OutGreaterThan,
ErrGreaterThan,
OutErrGreaterThan,
2021-06-30 03:42:56 +02:00
Eol,
}
#[derive(Debug, PartialEq, Eq)]
pub struct Token {
pub contents: TokenContents,
pub span: Span,
}
impl Token {
pub fn new(contents: TokenContents, span: Span) -> Token {
Token { contents, span }
}
}
#[derive(Clone, Copy, Debug)]
pub enum BlockKind {
Paren,
CurlyBracket,
SquareBracket,
}
impl BlockKind {
fn closing(self) -> u8 {
match self {
BlockKind::Paren => b')',
BlockKind::SquareBracket => b']',
BlockKind::CurlyBracket => b'}',
}
}
}
// A baseline token is terminated if it's not nested inside of a paired
// delimiter and the next character is one of: `|`, `;`, `#` or any
// whitespace.
2021-07-17 00:11:15 +02:00
fn is_item_terminator(
block_level: &[BlockKind],
c: u8,
additional_whitespace: &[u8],
special_tokens: &[u8],
) -> bool {
2021-06-30 03:42:56 +02:00
block_level.is_empty()
2021-07-06 00:58:56 +02:00
&& (c == b' '
|| c == b'\t'
|| c == b'\n'
|| c == b'\r'
2021-07-06 00:58:56 +02:00
|| c == b'|'
|| c == b';'
|| c == b'#'
2021-07-17 00:11:15 +02:00
|| additional_whitespace.contains(&c)
|| special_tokens.contains(&c))
2021-07-16 23:55:12 +02:00
}
// A special token is one that is a byte that stands alone as its own token. For example
// when parsing a signature you may want to have `:` be able to separate tokens and also
// to be handled as its own token to notify you you're about to parse a type in the example
// `foo:bar`
2021-07-17 00:11:15 +02:00
fn is_special_item(block_level: &[BlockKind], c: u8, special_tokens: &[u8]) -> bool {
block_level.is_empty() && special_tokens.contains(&c)
2021-06-30 03:42:56 +02:00
}
2021-07-06 00:58:56 +02:00
pub fn lex_item(
input: &[u8],
curr_offset: &mut usize,
span_offset: usize,
2021-07-17 00:11:15 +02:00
additional_whitespace: &[u8],
special_tokens: &[u8],
) -> (Token, Option<ParseError>) {
2021-06-30 03:42:56 +02:00
// This variable tracks the starting character of a string literal, so that
// we remain inside the string literal lexer mode until we encounter the
// closing quote.
let mut quote_start: Option<u8> = None;
let mut in_comment = false;
let token_start = *curr_offset;
// This Vec tracks paired delimiters
let mut block_level: Vec<BlockKind> = vec![];
// The process of slurping up a baseline token repeats:
//
// - String literal, which begins with `'` or `"`, and continues until
2021-06-30 03:42:56 +02:00
// the same character is encountered again.
// - Delimiter pair, which begins with `[`, `(`, or `{`, and continues until
// the matching closing delimiter is found, skipping comments and string
// literals.
// - When not nested inside of a delimiter pair, when a terminating
// character (whitespace, `|`, `;` or `#`) is encountered, the baseline
// token is done.
// - Otherwise, accumulate the character into the current baseline token.
while let Some(c) = input.get(*curr_offset) {
let c = *c;
if let Some(start) = quote_start {
// Check if we're in an escape sequence
if c == b'\\' && start == b'"' {
// Go ahead and consume the escape character if possible
if input.get(*curr_offset + 1).is_some() {
// Successfully escaped the character
*curr_offset += 2;
continue;
} else {
let span = Span::new(span_offset + token_start, span_offset + *curr_offset);
return (
Token {
contents: TokenContents::Item,
span,
},
Some(ParseError::UnexpectedEof(
(start as char).to_string(),
Protocol: debug_assert!() Span to reflect a valid slice (#6806) Also enforce this by #[non_exhaustive] span such that going forward we cannot, in debug builds (1), construct invalid spans. The motivation for this stems from #6431 where I've seen crashes due to invalid slice indexing. My hope is this will mitigate such senarios 1. https://github.com/nushell/nushell/pull/6431#issuecomment-1278147241 # Description (description of your pull request here) # Tests Make sure you've done the following: - [ ] Add tests that cover your changes, either in the command examples, the crate/tests folder, or in the /tests folder. - [ ] Try to think about corner cases and various ways how your changes could break. Cover them with tests. - [ ] If adding tests is not possible, please document in the PR body a minimal example with steps on how to reproduce so one can verify your change works. Make sure you've run and fixed any issues with these commands: - [x] `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - [ ] `cargo clippy --workspace --features=extra -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - [ ] `cargo test --workspace --features=extra` to check that all the tests pass # Documentation - [ ] If your PR touches a user-facing nushell feature then make sure that there is an entry in the documentation (https://github.com/nushell/nushell.github.io) for the feature, and update it if necessary.
2022-12-03 10:44:12 +01:00
Span::new(span.end, span.end),
)),
);
}
}
2021-06-30 03:42:56 +02:00
// If we encountered the closing quote character for the current
// string, we're done with the current string.
if c == start {
// Also need to check to make sure we aren't escaped
2021-06-30 03:42:56 +02:00
quote_start = None;
}
} else if c == b'#' {
2021-07-17 00:11:15 +02:00
if is_item_terminator(&block_level, c, additional_whitespace, special_tokens) {
2021-06-30 03:42:56 +02:00
break;
}
in_comment = true;
2021-12-14 21:17:02 +01:00
} else if c == b'\n' || c == b'\r' {
2021-06-30 03:42:56 +02:00
in_comment = false;
2021-07-17 00:11:15 +02:00
if is_item_terminator(&block_level, c, additional_whitespace, special_tokens) {
2021-06-30 03:42:56 +02:00
break;
}
} else if in_comment {
2021-07-17 00:11:15 +02:00
if is_item_terminator(&block_level, c, additional_whitespace, special_tokens) {
2021-06-30 03:42:56 +02:00
break;
}
2021-07-17 00:11:15 +02:00
} else if is_special_item(&block_level, c, special_tokens) && token_start == *curr_offset {
2021-07-16 23:55:12 +02:00
*curr_offset += 1;
break;
} else if c == b'\'' || c == b'"' || c == b'`' {
2021-06-30 03:42:56 +02:00
// We encountered the opening quote of a string literal.
quote_start = Some(c);
} else if c == b'[' {
// We encountered an opening `[` delimiter.
block_level.push(BlockKind::SquareBracket);
} else if c == b']' {
// We encountered a closing `]` delimiter. Pop off the opening `[`
// delimiter.
if let Some(BlockKind::SquareBracket) = block_level.last() {
let _ = block_level.pop();
}
} else if c == b'{' {
// We encountered an opening `{` delimiter.
block_level.push(BlockKind::CurlyBracket);
} else if c == b'}' {
// We encountered a closing `}` delimiter. Pop off the opening `{`.
if let Some(BlockKind::CurlyBracket) = block_level.last() {
let _ = block_level.pop();
}
} else if c == b'(' {
// We encountered an opening `(` delimiter.
2021-06-30 03:42:56 +02:00
block_level.push(BlockKind::Paren);
} else if c == b')' {
// We encountered a closing `)` delimiter. Pop off the opening `(`.
if let Some(BlockKind::Paren) = block_level.last() {
let _ = block_level.pop();
}
2021-07-17 00:11:15 +02:00
} else if is_item_terminator(&block_level, c, additional_whitespace, special_tokens) {
2021-06-30 03:42:56 +02:00
break;
}
*curr_offset += 1;
}
let span = Span::new(span_offset + token_start, span_offset + *curr_offset);
2021-06-30 03:42:56 +02:00
// If there is still unclosed opening delimiters, remember they were missing
2021-06-30 03:42:56 +02:00
if let Some(block) = block_level.last() {
let delim = block.closing();
Protocol: debug_assert!() Span to reflect a valid slice (#6806) Also enforce this by #[non_exhaustive] span such that going forward we cannot, in debug builds (1), construct invalid spans. The motivation for this stems from #6431 where I've seen crashes due to invalid slice indexing. My hope is this will mitigate such senarios 1. https://github.com/nushell/nushell/pull/6431#issuecomment-1278147241 # Description (description of your pull request here) # Tests Make sure you've done the following: - [ ] Add tests that cover your changes, either in the command examples, the crate/tests folder, or in the /tests folder. - [ ] Try to think about corner cases and various ways how your changes could break. Cover them with tests. - [ ] If adding tests is not possible, please document in the PR body a minimal example with steps on how to reproduce so one can verify your change works. Make sure you've run and fixed any issues with these commands: - [x] `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - [ ] `cargo clippy --workspace --features=extra -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - [ ] `cargo test --workspace --features=extra` to check that all the tests pass # Documentation - [ ] If your PR touches a user-facing nushell feature then make sure that there is an entry in the documentation (https://github.com/nushell/nushell.github.io) for the feature, and update it if necessary.
2022-12-03 10:44:12 +01:00
let cause =
ParseError::UnexpectedEof((delim as char).to_string(), Span::new(span.end, span.end));
2021-06-30 03:42:56 +02:00
return (
Token {
contents: TokenContents::Item,
span,
},
Some(cause),
);
2021-06-30 03:42:56 +02:00
}
if let Some(delim) = quote_start {
// The non-lite parse trims quotes on both sides, so we add the expected quote so that
// anyone wanting to consume this partial parse (e.g., completions) will be able to get
// correct information from the non-lite parse.
return (
Token {
contents: TokenContents::Item,
span,
},
2021-09-22 07:29:53 +02:00
Some(ParseError::UnexpectedEof(
(delim as char).to_string(),
Protocol: debug_assert!() Span to reflect a valid slice (#6806) Also enforce this by #[non_exhaustive] span such that going forward we cannot, in debug builds (1), construct invalid spans. The motivation for this stems from #6431 where I've seen crashes due to invalid slice indexing. My hope is this will mitigate such senarios 1. https://github.com/nushell/nushell/pull/6431#issuecomment-1278147241 # Description (description of your pull request here) # Tests Make sure you've done the following: - [ ] Add tests that cover your changes, either in the command examples, the crate/tests folder, or in the /tests folder. - [ ] Try to think about corner cases and various ways how your changes could break. Cover them with tests. - [ ] If adding tests is not possible, please document in the PR body a minimal example with steps on how to reproduce so one can verify your change works. Make sure you've run and fixed any issues with these commands: - [x] `cargo fmt --all -- --check` to check standard code formatting (`cargo fmt --all` applies these changes) - [ ] `cargo clippy --workspace --features=extra -- -D warnings -D clippy::unwrap_used -A clippy::needless_collect` to check that you're using the standard code style - [ ] `cargo test --workspace --features=extra` to check that all the tests pass # Documentation - [ ] If your PR touches a user-facing nushell feature then make sure that there is an entry in the documentation (https://github.com/nushell/nushell.github.io) for the feature, and update it if necessary.
2022-12-03 10:44:12 +01:00
Span::new(span.end, span.end),
2021-09-22 07:29:53 +02:00
)),
2021-06-30 03:42:56 +02:00
);
}
// If we didn't accumulate any characters, it's an unexpected error.
if *curr_offset - token_start == 0 {
return (
Token {
contents: TokenContents::Item,
span,
},
2021-06-30 03:42:56 +02:00
Some(ParseError::UnexpectedEof("command".to_string(), span)),
);
}
match &input[(span.start - span_offset)..(span.end - span_offset)] {
b"out>" => (
Token {
contents: TokenContents::OutGreaterThan,
span,
},
None,
),
b"err>" => (
Token {
contents: TokenContents::ErrGreaterThan,
span,
},
None,
),
b"out+err>" | b"err+out>" => (
Token {
contents: TokenContents::OutErrGreaterThan,
span,
},
None,
),
_ => (
Token {
contents: TokenContents::Item,
span,
},
None,
),
}
2021-06-30 03:42:56 +02:00
}
pub fn lex(
input: &[u8],
span_offset: usize,
2021-07-17 00:11:15 +02:00
additional_whitespace: &[u8],
special_tokens: &[u8],
2021-11-21 19:13:09 +01:00
skip_comment: bool,
2021-06-30 03:42:56 +02:00
) -> (Vec<Token>, Option<ParseError>) {
let mut error = None;
let mut curr_offset = 0;
2021-06-30 03:42:56 +02:00
let mut output = vec![];
let mut is_complete = true;
while let Some(c) = input.get(curr_offset) {
let c = *c;
if c == b'|' {
// If the next character is `|`, it's either `|` or `||`.
let idx = curr_offset;
let prev_idx = idx;
curr_offset += 1;
// If the next character is `|`, we're looking at a `||`.
if let Some(c) = input.get(curr_offset) {
if *c == b'|' {
let idx = curr_offset;
curr_offset += 1;
output.push(Token::new(
TokenContents::Item,
Span::new(span_offset + prev_idx, span_offset + idx + 1),
2021-06-30 03:42:56 +02:00
));
continue;
}
}
// Otherwise, it's just a regular `|` token.
// Before we push, check to see if the previous character was a newline.
// If so, then this is a continuation of the previous line
if let Some(prev) = output.last_mut() {
match prev.contents {
TokenContents::Eol => {
*prev = Token::new(
TokenContents::Pipe,
Span::new(span_offset + idx, span_offset + idx + 1),
)
}
_ => {
output.push(Token::new(
TokenContents::Pipe,
Span::new(span_offset + idx, span_offset + idx + 1),
));
}
}
} else {
output.push(Token::new(
TokenContents::Pipe,
Span::new(span_offset + idx, span_offset + idx + 1),
));
}
2021-06-30 03:42:56 +02:00
is_complete = false;
} else if c == b';' {
// If the next character is a `;`, we're looking at a semicolon token.
if !is_complete && error.is_none() {
error = Some(ParseError::ExtraTokens(Span::new(
curr_offset,
curr_offset + 1,
)));
}
let idx = curr_offset;
curr_offset += 1;
output.push(Token::new(
TokenContents::Semicolon,
Span::new(span_offset + idx, span_offset + idx + 1),
2021-06-30 03:42:56 +02:00
));
} else if c == b'\r' {
// Ignore a stand-alone carriage return
curr_offset += 1;
} else if c == b'\n' {
2021-06-30 03:42:56 +02:00
// If the next character is a newline, we're looking at an EOL (end of line) token.
let idx = curr_offset;
curr_offset += 1;
2021-07-17 00:11:15 +02:00
if !additional_whitespace.contains(&c) {
output.push(Token::new(
TokenContents::Eol,
Span::new(span_offset + idx, span_offset + idx + 1),
));
2021-06-30 03:42:56 +02:00
}
} else if c == b'#' {
// If the next character is `#`, we're at the beginning of a line
// comment. The comment continues until the next newline.
let mut start = curr_offset;
while let Some(input) = input.get(curr_offset) {
if *input == b'\n' {
2021-11-21 19:13:09 +01:00
if !skip_comment {
output.push(Token::new(
TokenContents::Comment,
2022-01-03 22:37:45 +01:00
Span::new(span_offset + start, span_offset + curr_offset),
2021-11-21 19:13:09 +01:00
));
}
2021-06-30 03:42:56 +02:00
start = curr_offset;
break;
} else {
curr_offset += 1;
2021-06-30 03:42:56 +02:00
}
}
2021-11-21 19:13:09 +01:00
if start != curr_offset && !skip_comment {
2021-06-30 03:42:56 +02:00
output.push(Token::new(
TokenContents::Comment,
Span::new(span_offset + start, span_offset + curr_offset),
2021-06-30 03:42:56 +02:00
));
}
2021-07-17 00:11:15 +02:00
} else if c == b' ' || c == b'\t' || additional_whitespace.contains(&c) {
2021-06-30 03:42:56 +02:00
// If the next character is non-newline whitespace, skip it.
curr_offset += 1;
} else {
// Otherwise, try to consume an unclassified token.
let (token, err) = lex_item(
2021-07-17 00:11:15 +02:00
input,
&mut curr_offset,
span_offset,
2021-07-17 00:11:15 +02:00
additional_whitespace,
special_tokens,
);
2021-06-30 03:42:56 +02:00
if error.is_none() {
error = err;
}
is_complete = true;
output.push(token);
2021-06-30 03:42:56 +02:00
}
}
(output, error)
}