2021-01-29 14:43:35 +01:00
use crate ::pretty ::{ DbgDocBldr , DebugDocBuilder , PrettyDebugWithSource } ;
2019-11-21 15:33:14 +01:00
use crate ::text ::Text ;
2019-06-11 07:53:04 +02:00
use derive_new ::new ;
2019-06-22 03:36:57 +02:00
use getset ::Getters ;
2019-08-02 21:15:07 +02:00
use serde ::Deserialize ;
2019-07-13 04:18:02 +02:00
use serde ::Serialize ;
2020-03-01 01:20:42 +01:00
use std ::cmp ::Ordering ;
2019-09-14 18:30:24 +02:00
use std ::path ::{ Path , PathBuf } ;
2019-06-11 07:53:04 +02:00
2020-01-17 21:35:48 +01:00
/// Anchors represent a location that a value originated from. The value may have been loaded from a file, fetched from a website, or parsed from some text
2019-11-21 15:33:14 +01:00
#[ derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash) ]
pub enum AnchorLocation {
2020-01-17 21:35:48 +01:00
/// The originating site where the value was first found
2019-11-21 15:33:14 +01:00
Url ( String ) ,
2020-01-17 21:35:48 +01:00
/// The original file where the value was loaded from
2019-11-21 15:33:14 +01:00
File ( String ) ,
2020-01-17 21:35:48 +01:00
/// The text where the value was parsed from
2019-11-21 15:33:14 +01:00
Source ( Text ) ,
}
pub trait HasTag {
2020-01-18 20:42:36 +01:00
/// Get the associated metadata
2019-11-21 15:33:14 +01:00
fn tag ( & self ) -> Tag ;
}
2020-01-18 20:42:36 +01:00
/// A wrapper type that attaches a Span to a value
2019-08-05 10:54:29 +02:00
#[ derive(new, Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash) ]
2019-10-13 06:12:43 +02:00
pub struct Spanned < T > {
pub span : Span ,
pub item : T ,
}
impl < T > Spanned < T > {
2020-01-18 20:42:36 +01:00
/// Allows mapping over a Spanned value
2019-10-13 06:12:43 +02:00
pub fn map < U > ( self , input : impl FnOnce ( T ) -> U ) -> Spanned < U > {
let span = self . span ;
let mapped = input ( self . item ) ;
mapped . spanned ( span )
}
}
2019-11-04 16:47:03 +01:00
impl Spanned < String > {
2020-01-18 20:42:36 +01:00
/// Iterates over the contained String
2019-11-04 16:47:03 +01:00
pub fn items < ' a , U > (
items : impl Iterator < Item = & ' a Spanned < String > > ,
) -> impl Iterator < Item = & ' a str > {
2019-12-06 16:28:26 +01:00
items . map ( | item | & item . item [ .. ] )
2019-11-04 16:47:03 +01:00
}
2020-01-18 20:42:36 +01:00
/// Borrows the contained String
2019-11-04 16:47:03 +01:00
pub fn borrow_spanned ( & self ) -> Spanned < & str > {
let span = self . span ;
self . item [ .. ] . spanned ( span )
}
2021-01-22 19:13:29 +01:00
pub fn slice_spanned ( & self , span : impl Into < Span > ) -> Spanned < & str > {
let span = span . into ( ) ;
let item = & self . item [ span . start ( ) .. span . end ( ) ] ;
item . spanned ( span )
}
2019-11-04 16:47:03 +01:00
}
2019-10-13 06:12:43 +02:00
pub trait SpannedItem : Sized {
2020-01-18 20:42:36 +01:00
/// Converts a value into a Spanned value
2019-10-13 06:12:43 +02:00
fn spanned ( self , span : impl Into < Span > ) -> Spanned < Self > {
Spanned {
item : self ,
span : span . into ( ) ,
}
}
2020-01-18 20:42:36 +01:00
/// Converts a value into a Spanned value, using an unknown Span
2019-10-13 06:12:43 +02:00
fn spanned_unknown ( self ) -> Spanned < Self > {
Spanned {
item : self ,
span : Span ::unknown ( ) ,
}
}
}
impl < T > SpannedItem for T { }
impl < T > std ::ops ::Deref for Spanned < T > {
type Target = T ;
2020-01-18 20:42:36 +01:00
/// Shorthand to deref to the contained value
2019-10-13 06:12:43 +02:00
fn deref ( & self ) -> & T {
& self . item
}
}
2019-11-21 15:33:14 +01:00
2020-01-18 20:42:36 +01:00
/// A wrapper type that attaches a Tag to a value
2019-10-13 06:12:43 +02:00
#[ derive(new, Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, Hash) ]
2019-08-01 05:25:59 +02:00
pub struct Tagged < T > {
pub tag : Tag ,
2019-06-27 06:56:48 +02:00
pub item : T ,
2019-06-11 07:53:04 +02:00
}
2019-11-04 16:47:03 +01:00
impl Tagged < String > {
2020-01-18 20:42:36 +01:00
/// Allows borrowing the contained string slice as a spanned value
2019-11-04 16:47:03 +01:00
pub fn borrow_spanned ( & self ) -> Spanned < & str > {
let span = self . tag . span ;
self . item [ .. ] . spanned ( span )
}
2020-01-18 20:42:36 +01:00
/// Allows borrowing the contained string slice as a tagged value
2019-11-04 16:47:03 +01:00
pub fn borrow_tagged ( & self ) -> Tagged < & str > {
self . item [ .. ] . tagged ( self . tag . clone ( ) )
}
}
2019-11-21 15:33:14 +01:00
impl < T > Tagged < Vec < T > > {
2020-01-18 20:42:36 +01:00
/// Iterates over the contained value(s)
2019-11-21 15:33:14 +01:00
pub fn items ( & self ) -> impl Iterator < Item = & T > {
self . item . iter ( )
}
}
2019-09-14 18:30:24 +02:00
impl < T > HasTag for Tagged < T > {
2020-01-18 20:42:36 +01:00
/// Helper for getting the Tag from the Tagged value
2019-09-14 18:30:24 +02:00
fn tag ( & self ) -> Tag {
2019-10-13 06:12:43 +02:00
self . tag . clone ( )
2019-09-14 18:30:24 +02:00
}
}
impl AsRef < Path > for Tagged < PathBuf > {
2020-01-18 20:42:36 +01:00
/// Gets the reference to the contained Path
2019-09-14 18:30:24 +02:00
fn as_ref ( & self ) -> & Path {
self . item . as_ref ( )
2019-07-24 00:22:11 +02:00
}
}
2019-08-01 05:25:59 +02:00
pub trait TaggedItem : Sized {
2019-08-05 10:54:29 +02:00
fn tagged ( self , tag : impl Into < Tag > ) -> Tagged < Self > {
2019-10-13 06:12:43 +02:00
Tagged {
item : self ,
tag : tag . into ( ) ,
}
2019-07-09 06:31:26 +02:00
}
2019-07-08 18:44:53 +02:00
// For now, this is a temporary facility. In many cases, there are other useful spans that we
// could be using, such as the original source spans of JSON or Toml files, but we don't yet
// have the infrastructure to make that work.
2019-08-01 05:25:59 +02:00
fn tagged_unknown ( self ) -> Tagged < Self > {
2019-10-13 06:12:43 +02:00
Tagged {
item : self ,
tag : Tag {
2019-08-05 10:54:29 +02:00
span : Span ::unknown ( ) ,
2019-10-13 06:12:43 +02:00
anchor : None ,
2019-08-05 10:54:29 +02:00
} ,
2019-10-13 06:12:43 +02:00
}
2019-07-08 18:44:53 +02:00
}
2019-06-29 10:55:42 +02:00
}
2019-08-01 05:25:59 +02:00
impl < T > TaggedItem for T { }
2019-06-29 10:55:42 +02:00
2019-08-01 05:25:59 +02:00
impl < T > std ::ops ::Deref for Tagged < T > {
2019-06-11 07:53:04 +02:00
type Target = T ;
fn deref ( & self ) -> & T {
& self . item
}
}
2019-08-01 05:25:59 +02:00
impl < T > Tagged < T > {
pub fn map < U > ( self , input : impl FnOnce ( T ) -> U ) -> Tagged < U > {
2019-08-05 10:54:29 +02:00
let tag = self . tag ( ) ;
2019-06-11 07:53:04 +02:00
2019-08-01 05:25:59 +02:00
let mapped = input ( self . item ) ;
2019-10-13 06:12:43 +02:00
mapped . tagged ( tag )
2019-08-01 05:25:59 +02:00
}
2019-06-11 07:53:04 +02:00
2019-11-03 09:49:06 +01:00
pub fn map_anchored ( self , anchor : & Option < AnchorLocation > ) -> Tagged < T > {
let mut tag = self . tag ;
tag . anchor = anchor . clone ( ) ;
Tagged {
item : self . item ,
2019-12-06 16:28:26 +01:00
tag ,
2019-11-03 09:49:06 +01:00
}
}
2019-11-04 16:47:03 +01:00
pub fn transpose ( & self ) -> Tagged < & T > {
Tagged {
item : & self . item ,
tag : self . tag . clone ( ) ,
}
}
2020-02-10 03:08:14 +01:00
/// Creates a new `Tag` from the current `Tag`
2019-08-05 10:54:29 +02:00
pub fn tag ( & self ) -> Tag {
2019-10-13 06:12:43 +02:00
self . tag . clone ( )
2019-08-05 10:54:29 +02:00
}
2020-02-10 03:08:14 +01:00
/// Retrieve the `Span` for the current `Tag`.
2019-09-18 08:37:04 +02:00
pub fn span ( & self ) -> Span {
self . tag . span
}
2020-02-10 03:08:14 +01:00
/// Returns the `AnchorLocation` of the `Tag` if there is one.
2019-10-13 06:12:43 +02:00
pub fn anchor ( & self ) -> Option < AnchorLocation > {
self . tag . anchor . clone ( )
2019-08-05 10:54:29 +02:00
}
2020-02-10 03:08:14 +01:00
/// Returns the underlying `AnchorLocation` variant type as a string.
2019-10-13 06:12:43 +02:00
pub fn anchor_name ( & self ) -> Option < String > {
match self . tag . anchor {
Some ( AnchorLocation ::File ( ref file ) ) = > Some ( file . clone ( ) ) ,
Some ( AnchorLocation ::Url ( ref url ) ) = > Some ( url . clone ( ) ) ,
2019-08-10 22:18:14 +02:00
_ = > None ,
}
}
2020-02-10 03:08:14 +01:00
/// Returns a reference to the current `Tag`'s item.
2019-08-05 10:54:29 +02:00
pub fn item ( & self ) -> & T {
& self . item
2019-07-08 18:44:53 +02:00
}
2019-08-16 00:18:18 +02:00
2020-02-10 03:08:14 +01:00
/// Returns a tuple of the `Tagged` item and `Tag`.
2019-08-16 00:18:18 +02:00
pub fn into_parts ( self ) -> ( T , Tag ) {
( self . item , self . tag )
}
2019-07-08 18:44:53 +02:00
}
2019-09-14 18:30:24 +02:00
impl From < & Tag > for Tag {
fn from ( input : & Tag ) -> Tag {
2019-10-13 06:12:43 +02:00
input . clone ( )
2019-06-22 03:36:57 +02:00
}
}
2019-06-11 07:53:04 +02:00
impl From < ( usize , usize ) > for Span {
fn from ( input : ( usize , usize ) ) -> Span {
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
Span ::new ( input . 0 , input . 1 )
2019-06-11 07:53:04 +02:00
}
}
impl From < & std ::ops ::Range < usize > > for Span {
fn from ( input : & std ::ops ::Range < usize > ) -> Span {
2019-12-04 22:14:52 +01:00
Span ::new ( input . start , input . end )
2019-08-01 05:25:59 +02:00
}
}
2020-01-17 21:35:48 +01:00
/// The set of metadata that can be associated with a value
2019-08-01 05:25:59 +02:00
#[ derive(
2020-02-10 21:32:10 +01:00
Debug ,
Default ,
Clone ,
PartialEq ,
Eq ,
Ord ,
PartialOrd ,
Serialize ,
Deserialize ,
Hash ,
Getters ,
new ,
2019-08-01 05:25:59 +02:00
) ]
pub struct Tag {
2020-01-17 21:35:48 +01:00
/// The original source for this value
2019-10-13 06:12:43 +02:00
pub anchor : Option < AnchorLocation > ,
2020-01-17 21:35:48 +01:00
/// The span in the source text for the command that created this value
2019-08-01 05:25:59 +02:00
pub span : Span ,
}
2019-08-16 00:18:18 +02:00
impl From < Span > for Tag {
fn from ( span : Span ) -> Self {
2019-10-13 06:12:43 +02:00
Tag { anchor : None , span }
2019-08-16 00:18:18 +02:00
}
}
impl From < & Span > for Tag {
fn from ( span : & Span ) -> Self {
Tag {
2019-10-13 06:12:43 +02:00
anchor : None ,
2019-08-16 00:18:18 +02:00
span : * span ,
}
}
}
2019-10-13 06:12:43 +02:00
impl From < ( usize , usize , AnchorLocation ) > for Tag {
fn from ( ( start , end , anchor ) : ( usize , usize , AnchorLocation ) ) -> Self {
2019-09-14 18:30:24 +02:00
Tag {
2019-10-13 06:12:43 +02:00
anchor : Some ( anchor ) ,
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
span : Span ::new ( start , end ) ,
2019-09-14 18:30:24 +02:00
}
}
}
2019-10-13 06:12:43 +02:00
impl From < ( usize , usize , Option < AnchorLocation > ) > for Tag {
fn from ( ( start , end , anchor ) : ( usize , usize , Option < AnchorLocation > ) ) -> Self {
2019-09-14 18:30:24 +02:00
Tag {
2019-10-13 06:12:43 +02:00
anchor ,
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
span : Span ::new ( start , end ) ,
2019-09-14 18:30:24 +02:00
}
}
}
2019-08-17 05:53:39 +02:00
impl From < Tag > for Span {
fn from ( tag : Tag ) -> Self {
tag . span
}
}
impl From < & Tag > for Span {
fn from ( tag : & Tag ) -> Self {
tag . span
}
}
2019-08-05 10:54:29 +02:00
impl Tag {
2020-02-10 21:32:10 +01:00
/// Creates a default `Tag' with unknown `Span` position and no `AnchorLocation`
pub fn default ( ) -> Self {
Tag {
anchor : None ,
span : Span ::unknown ( ) ,
}
}
2020-10-03 16:06:02 +02:00
pub fn anchored ( self , anchor : Option < AnchorLocation > ) -> Tag {
Tag {
anchor ,
span : self . span ,
}
}
2020-02-10 03:08:14 +01:00
/// Creates a `Tag` from the given `Span` with no `AnchorLocation`
2019-09-29 07:13:56 +02:00
pub fn unknown_anchor ( span : Span ) -> Tag {
2019-10-13 06:12:43 +02:00
Tag { anchor : None , span }
2019-08-05 10:54:29 +02:00
}
2020-02-10 03:08:14 +01:00
/// Creates a `Tag` from the given `AnchorLocation` for a span with a length of 1.
2019-10-13 06:12:43 +02:00
pub fn for_char ( pos : usize , anchor : AnchorLocation ) -> Tag {
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 22:22:50 +02:00
Tag {
2019-10-13 06:12:43 +02:00
anchor : Some ( anchor ) ,
2019-12-04 22:14:52 +01:00
span : Span ::new ( pos , pos + 1 ) ,
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 22:22:50 +02:00
}
}
2021-01-02 05:24:32 +01:00
/// Creates a `Tag` for the given `AnchorLocation` with unknown `Span` position.
2019-10-13 06:12:43 +02:00
pub fn unknown_span ( anchor : AnchorLocation ) -> Tag {
2019-09-14 18:30:24 +02:00
Tag {
2019-10-13 06:12:43 +02:00
anchor : Some ( anchor ) ,
2019-09-14 18:30:24 +02:00
span : Span ::unknown ( ) ,
}
}
2020-02-10 03:08:14 +01:00
/// Creates a `Tag` with no `AnchorLocation` and an unknown `Span` position.
2019-08-05 10:54:29 +02:00
pub fn unknown ( ) -> Tag {
Tag {
2019-10-13 06:12:43 +02:00
anchor : None ,
2019-08-05 10:54:29 +02:00
span : Span ::unknown ( ) ,
}
}
2019-09-14 18:30:24 +02:00
2020-02-10 03:08:14 +01:00
/// Returns the `AnchorLocation` of the current `Tag`
2019-11-21 15:33:14 +01:00
pub fn anchor ( & self ) -> Option < AnchorLocation > {
self . anchor . clone ( )
}
2020-02-10 03:08:14 +01:00
// Merges the current `Tag` with the given `Tag`.
///
/// Both Tags must share the same `AnchorLocation`.
// The resulting `Tag` will have a `Span` that starts from the current `Tag` and ends at `Span` of the given `Tag`.
2019-09-14 18:30:24 +02:00
pub fn until ( & self , other : impl Into < Tag > ) -> Tag {
let other = other . into ( ) ;
2019-09-14 19:16:52 +02:00
debug_assert! (
2019-09-29 07:13:56 +02:00
self . anchor = = other . anchor ,
" Can only merge two tags with the same anchor "
2019-09-14 19:16:52 +02:00
) ;
2019-09-14 18:30:24 +02:00
Tag {
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
span : Span ::new ( self . span . start , other . span . end ) ,
2019-10-13 06:12:43 +02:00
anchor : self . anchor . clone ( ) ,
2019-09-14 18:30:24 +02:00
}
}
2020-02-10 03:08:14 +01:00
/// Merges the current `Tag` with the given optional `Tag`.
///
/// Both `Tag`s must share the same `AnchorLocation`.
/// The resulting `Tag` will have a `Span` that starts from the current `Tag` and ends at `Span` of the given `Tag`.
2021-01-02 05:24:32 +01:00
/// Should the `None` variant be passed in, a new `Tag` with the same `Span` and `AnchorLocation` will be returned.
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 22:22:50 +02:00
pub fn until_option ( & self , other : Option < impl Into < Tag > > ) -> Tag {
match other {
Some ( other ) = > {
let other = other . into ( ) ;
debug_assert! (
self . anchor = = other . anchor ,
" Can only merge two tags with the same anchor "
) ;
Tag {
span : Span ::new ( self . span . start , other . span . end ) ,
2019-10-13 06:12:43 +02:00
anchor : self . anchor . clone ( ) ,
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 22:22:50 +02:00
}
}
2019-10-13 06:12:43 +02:00
None = > self . clone ( ) ,
Overhaul the coloring system
This commit replaces the previous naive coloring system with a coloring
system that is more aligned with the parser.
The main benefit of this change is that it allows us to use parsing
rules to decide how to color tokens.
For example, consider the following syntax:
```
$ ps | where cpu > 10
```
Ideally, we could color `cpu` like a column name and not a string,
because `cpu > 10` is a shorthand block syntax that expands to
`{ $it.cpu > 10 }`.
The way that we know that it's a shorthand block is that the `where`
command declares that its first parameter is a `SyntaxShape::Block`,
which allows the shorthand block form.
In order to accomplish this, we need to color the tokens in a way that
corresponds to their expanded semantics, which means that high-fidelity
coloring requires expansion.
This commit adds a `ColorSyntax` trait that corresponds to the
`ExpandExpression` trait. The semantics are fairly similar, with a few
differences.
First `ExpandExpression` consumes N tokens and returns a single
`hir::Expression`. `ColorSyntax` consumes N tokens and writes M
`FlatShape` tokens to the output.
Concretely, for syntax like `[1 2 3]`
- `ExpandExpression` takes a single token node and produces a single
`hir::Expression`
- `ColorSyntax` takes the same token node and emits 7 `FlatShape`s
(open delimiter, int, whitespace, int, whitespace, int, close
delimiter)
Second, `ColorSyntax` is more willing to plow through failures than
`ExpandExpression`.
In particular, consider syntax like
```
$ ps | where cpu >
```
In this case
- `ExpandExpression` will see that the `where` command is expecting a
block, see that it's not a literal block and try to parse it as a
shorthand block. It will successfully find a member followed by an
infix operator, but not a following expression. That means that the
entire pipeline part fails to parse and is a syntax error.
- `ColorSyntax` will also try to parse it as a shorthand block and
ultimately fail, but it will fall back to "backoff coloring mode",
which parsing any unidentified tokens in an unfallible, simple way. In
this case, `cpu` will color as a string and `>` will color as an
operator.
Finally, it's very important that coloring a pipeline infallibly colors
the entire string, doesn't fail, and doesn't get stuck in an infinite
loop.
In order to accomplish this, this PR separates `ColorSyntax`, which is
infallible from `FallibleColorSyntax`, which might fail. This allows the
type system to let us know if our coloring rules bottom out at at an
infallible rule.
It's not perfect: it's still possible for the coloring process to get
stuck or consume tokens non-atomically. I intend to reduce the
opportunity for those problems in a future commit. In the meantime, the
current system catches a number of mistakes (like trying to use a
fallible coloring rule in a loop without thinking about the possibility
that it will never terminate).
2019-10-06 22:22:50 +02:00
}
}
2019-09-14 18:30:24 +02:00
pub fn slice < ' a > ( & self , source : & ' a str ) -> & ' a str {
self . span . slice ( source )
}
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
2021-01-01 03:13:59 +01:00
pub fn string ( & self , source : & str ) -> String {
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
self . span . slice ( source ) . to_string ( )
}
pub fn tagged_slice < ' a > ( & self , source : & ' a str ) -> Tagged < & ' a str > {
self . span . slice ( source ) . tagged ( self )
}
2021-01-01 03:13:59 +01:00
pub fn tagged_string ( & self , source : & str ) -> Tagged < String > {
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
self . span . slice ( source ) . to_string ( ) . tagged ( self )
}
2019-11-21 15:33:14 +01:00
pub fn anchor_name ( & self ) -> Option < String > {
match self . anchor {
Some ( AnchorLocation ::File ( ref file ) ) = > Some ( file . clone ( ) ) ,
Some ( AnchorLocation ::Url ( ref url ) ) = > Some ( url . clone ( ) ) ,
_ = > None ,
}
}
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
}
pub fn tag_for_tagged_list ( mut iter : impl Iterator < Item = Tag > ) -> Tag {
let first = iter . next ( ) ;
let first = match first {
None = > return Tag ::unknown ( ) ,
Some ( first ) = > first ,
} ;
let last = iter . last ( ) ;
2019-11-04 16:47:03 +01:00
match last {
None = > first ,
Some ( last ) = > first . until ( last ) ,
}
}
pub fn span_for_spanned_list ( mut iter : impl Iterator < Item = Span > ) -> Span {
let first = iter . next ( ) ;
let first = match first {
None = > return Span ::unknown ( ) ,
Some ( first ) = > first ,
} ;
let last = iter . last ( ) ;
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
match last {
None = > first ,
Some ( last ) = > first . until ( last ) ,
}
2019-08-05 10:54:29 +02:00
}
2020-02-10 03:08:14 +01:00
/// A `Span` is metadata which indicates the start and end positions.
///
/// `Span`s are combined with `AnchorLocation`s to form another type of metadata, a `Tag`.
/// A `Span`'s end position must be greater than or equal to its start position.
2020-02-10 21:32:10 +01:00
#[ derive(
Debug , Default , Clone , Copy , PartialEq , Eq , Ord , PartialOrd , Serialize , Deserialize , Hash ,
) ]
2019-08-01 05:25:59 +02:00
pub struct Span {
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
start : usize ,
end : usize ,
2019-08-01 05:25:59 +02:00
}
2019-11-21 15:33:14 +01:00
impl From < & Span > for Span {
fn from ( span : & Span ) -> Span {
* span
}
}
2019-08-01 05:25:59 +02:00
impl From < Option < Span > > for Span {
fn from ( input : Option < Span > ) -> Span {
2020-08-03 19:43:27 +02:00
input . unwrap_or_else ( | | Span ::new ( 0 , 0 ) )
2019-06-11 07:53:04 +02:00
}
}
2020-05-18 20:44:27 +02:00
impl From < Span > for std ::ops ::Range < usize > {
fn from ( input : Span ) -> std ::ops ::Range < usize > {
std ::ops ::Range {
start : input . start ,
end : input . end ,
}
}
}
2019-06-11 07:53:04 +02:00
impl Span {
2020-02-10 21:32:10 +01:00
/// Creates a default new `Span` that has 0 start and 0 end.
pub fn default ( ) -> Self {
Span ::unknown ( )
}
2020-02-10 03:08:14 +01:00
/// Creates a new `Span` that has 0 start and 0 end.
2019-07-08 18:44:53 +02:00
pub fn unknown ( ) -> Span {
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
Span ::new ( 0 , 0 )
}
2021-02-04 08:20:21 +01:00
pub fn from_list ( list : & [ impl HasSpan ] ) -> Span {
let mut iterator = list . iter ( ) ;
match iterator . next ( ) {
None = > Span ::new ( 0 , 0 ) ,
Some ( first ) = > {
let last = iterator . last ( ) . unwrap_or ( first ) ;
Span ::new ( first . span ( ) . start , last . span ( ) . end )
}
}
}
2020-02-10 03:08:14 +01:00
/// Creates a new `Span` from start and end inputs. The end parameter must be greater than or equal to the start parameter.
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
pub fn new ( start : usize , end : usize ) -> Span {
assert! (
end > = start ,
" Can't create a Span whose end < start, start={}, end={} " ,
start ,
end
) ;
Span { start , end }
2019-07-19 21:48:14 +02:00
}
2020-02-10 03:08:14 +01:00
/// Creates a `Span` with a length of 1 from the given position.
///
/// # Example
///
/// ```
/// let char_span = Span::for_char(5);
///
/// assert_eq!(char_span.start(), 5);
/// assert_eq!(char_span.end(), 6);
/// ```
2019-10-13 06:12:43 +02:00
pub fn for_char ( pos : usize ) -> Span {
Span {
start : pos ,
end : pos + 1 ,
}
}
2020-02-10 03:08:14 +01:00
/// Returns a bool indicating if the given position falls inside the current `Span`.
///
/// # Example
///
/// ```
/// let span = Span::new(2, 8);
///
/// assert_eq!(span.contains(5), true);
2020-08-21 21:37:51 +02:00
/// assert_eq!(span.contains(8), false);
2020-02-10 03:08:14 +01:00
/// assert_eq!(span.contains(100), false);
/// ```
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
pub fn contains ( & self , pos : usize ) -> bool {
2020-08-21 21:37:51 +02:00
self . start < = pos & & pos < self . end
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
}
2020-02-10 03:08:14 +01:00
/// Returns a new Span by merging an earlier Span with the current Span.
///
/// The resulting Span will have the same start position as the given Span and same end as the current Span.
///
/// # Example
///
/// ```
/// let original_span = Span::new(4, 6);
/// let earlier_span = Span::new(1, 3);
/// let merged_span = origin_span.since(earlier_span);
///
/// assert_eq!(merged_span.start(), 1);
/// assert_eq!(merged_span.end(), 6);
/// ```
2020-01-10 16:44:24 +01:00
pub fn since ( & self , other : impl Into < Span > ) -> Span {
let other = other . into ( ) ;
Span ::new ( other . start , self . end )
}
2020-02-10 03:08:14 +01:00
/// Returns a new Span by merging a later Span with the current Span.
///
/// The resulting Span will have the same start position as the current Span and same end as the given Span.
///
/// # Example
///
/// ```
/// let original_span = Span::new(4, 6);
/// let later_span = Span::new(9, 11);
/// let merged_span = origin_span.until(later_span);
///
/// assert_eq!(merged_span.start(), 4);
/// assert_eq!(merged_span.end(), 11);
/// ```
2019-10-13 06:12:43 +02:00
pub fn until ( & self , other : impl Into < Span > ) -> Span {
let other = other . into ( ) ;
Span ::new ( self . start , other . end )
}
2020-02-10 03:08:14 +01:00
/// Returns a new Span by merging a later Span with the current Span.
///
/// If the given Span is of the None variant,
/// A Span with the same values as the current Span is returned.
2019-10-13 06:12:43 +02:00
pub fn until_option ( & self , other : Option < impl Into < Span > > ) -> Span {
match other {
Some ( other ) = > {
let other = other . into ( ) ;
Span ::new ( self . start , other . end )
}
None = > * self ,
}
}
2021-01-01 03:13:59 +01:00
pub fn string ( & self , source : & str ) -> String {
2019-10-13 06:12:43 +02:00
self . slice ( source ) . to_string ( )
}
pub fn spanned_slice < ' a > ( & self , source : & ' a str ) -> Spanned < & ' a str > {
self . slice ( source ) . spanned ( * self )
}
2021-01-01 03:13:59 +01:00
pub fn spanned_string ( & self , source : & str ) -> Spanned < String > {
2019-10-13 06:12:43 +02:00
self . slice ( source ) . to_string ( ) . spanned ( * self )
}
2020-02-10 03:08:14 +01:00
/// Returns the start position of the current Span.
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
pub fn start ( & self ) -> usize {
self . start
}
2020-02-10 03:08:14 +01:00
/// Returns the end position of the current Span.
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
pub fn end ( & self ) -> usize {
self . end
}
2020-02-10 03:08:14 +01:00
/// Returns a bool if the current Span indicates an "unknown" position.
///
/// # Example
///
/// ```
/// let unknown_span = Span::unknown();
/// let known_span = Span::new(4, 6);
///
/// assert_eq!(unknown_span.is_unknown(), true);
/// assert_eq!(known_span.is_unknown(), false);
/// ```
2019-07-08 18:44:53 +02:00
pub fn is_unknown ( & self ) -> bool {
self . start = = 0 & & self . end = = 0
}
2020-03-16 21:50:45 +01:00
/// Returns a bool if the current Span does not cover.
///
/// # Example
///
/// ```
/// // make clean
/// // ----
/// // (0,4)
2020-08-03 19:43:27 +02:00
/// //
2020-03-16 21:50:45 +01:00
/// // ^(5,5)
2020-08-03 19:43:27 +02:00
///
2020-03-16 21:50:45 +01:00
/// let make_span = Span::new(0,4);
/// let clean_span = Span::new(5,5);
///
/// assert_eq!(make_span.is_closed(), false);
/// assert_eq!(clean_span.is_closed(), true);
/// ```
pub fn is_closed ( & self ) -> bool {
self . start = = self . end
}
2020-02-10 03:08:14 +01:00
/// Returns a slice of the input that covers the start and end of the current Span.
2019-08-29 14:16:11 +02:00
pub fn slice < ' a > ( & self , source : & ' a str ) -> & ' a str {
2019-06-22 03:36:57 +02:00
& source [ self . start .. self . end ]
2019-06-11 07:53:04 +02:00
}
}
2020-03-01 01:20:42 +01:00
impl PartialOrd < usize > for Span {
fn partial_cmp ( & self , other : & usize ) -> Option < Ordering > {
( self . end - self . start ) . partial_cmp ( other )
}
}
impl PartialEq < usize > for Span {
fn eq ( & self , other : & usize ) -> bool {
( self . end - self . start ) = = * other
}
}
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
pub trait IntoSpanned {
type Output : HasFallibleSpan ;
fn into_spanned ( self , span : impl Into < Span > ) -> Self ::Output ;
}
impl < T : HasFallibleSpan > IntoSpanned for T {
type Output = T ;
fn into_spanned ( self , _span : impl Into < Span > ) -> Self ::Output {
self
}
}
pub trait HasSpan {
2019-10-28 15:46:50 +01:00
fn span ( & self ) -> Span ;
}
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
impl < T , E > HasSpan for Result < T , E >
where
T : HasSpan ,
{
fn span ( & self ) -> Span {
match self {
Result ::Ok ( val ) = > val . span ( ) ,
Result ::Err ( _ ) = > Span ::unknown ( ) ,
}
}
}
impl < T > HasSpan for Spanned < T > {
fn span ( & self ) -> Span {
self . span
}
}
pub trait HasFallibleSpan {
2019-10-28 15:46:50 +01:00
fn maybe_span ( & self ) -> Option < Span > ;
}
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
impl HasFallibleSpan for bool {
2019-10-28 15:46:50 +01:00
fn maybe_span ( & self ) -> Option < Span > {
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
None
}
}
impl HasFallibleSpan for ( ) {
fn maybe_span ( & self ) -> Option < Span > {
None
2019-10-28 15:46:50 +01:00
}
}
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
impl < T > HasFallibleSpan for T
2019-10-28 15:46:50 +01:00
where
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
T : HasSpan ,
2019-10-28 15:46:50 +01:00
{
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
fn maybe_span ( & self ) -> Option < Span > {
Some ( HasSpan ::span ( self ) )
2019-10-28 15:46:50 +01:00
}
}
2019-11-21 15:33:14 +01:00
impl PrettyDebugWithSource for Option < Span > {
fn pretty_debug ( & self , source : & str ) -> DebugDocBuilder {
match self {
2021-01-29 14:43:35 +01:00
None = > DbgDocBldr ::description ( " no span " ) ,
2019-11-21 15:33:14 +01:00
Some ( span ) = > span . pretty_debug ( source ) ,
}
2019-10-28 15:46:50 +01:00
}
}
2019-11-21 15:33:14 +01:00
impl HasFallibleSpan for Option < Span > {
fn maybe_span ( & self ) -> Option < Span > {
* self
2019-10-28 15:46:50 +01:00
}
}
2019-11-21 15:33:14 +01:00
impl PrettyDebugWithSource for Span {
fn pretty_debug ( & self , source : & str ) -> DebugDocBuilder {
2021-01-29 14:43:35 +01:00
DbgDocBldr ::typed (
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
" span " ,
2021-01-29 14:43:35 +01:00
DbgDocBldr ::keyword ( " for " )
+ DbgDocBldr ::space ( )
+ DbgDocBldr ::description ( format! ( " {:?} " , self . slice ( source ) ) ) ,
2019-11-21 15:33:14 +01:00
)
2019-10-28 15:46:50 +01:00
}
}
impl HasSpan for Span {
fn span ( & self ) -> Span {
* self
}
}
2019-11-21 15:33:14 +01:00
impl < T > PrettyDebugWithSource for Option < Spanned < T > >
2019-10-28 15:46:50 +01:00
where
2019-11-21 15:33:14 +01:00
Spanned < T > : PrettyDebugWithSource ,
2019-10-28 15:46:50 +01:00
{
2019-11-21 15:33:14 +01:00
fn pretty_debug ( & self , source : & str ) -> DebugDocBuilder {
2019-10-28 15:46:50 +01:00
match self {
2021-01-29 14:43:35 +01:00
None = > DbgDocBldr ::description ( " nothing " ) ,
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
Some ( v ) = > v . pretty_debug ( v . span . slice ( source ) ) ,
2019-10-28 15:46:50 +01:00
}
}
}
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
impl < T > HasFallibleSpan for Option < Spanned < T > > {
2019-10-28 15:46:50 +01:00
fn maybe_span ( & self ) -> Option < Span > {
match self {
None = > None ,
Some ( value ) = > Some ( value . span ) ,
}
}
}
2019-11-21 15:33:14 +01:00
impl < T > PrettyDebugWithSource for Option < Tagged < T > >
2019-10-28 15:46:50 +01:00
where
2019-11-21 15:33:14 +01:00
Tagged < T > : PrettyDebugWithSource ,
2019-10-28 15:46:50 +01:00
{
2019-11-21 15:33:14 +01:00
fn pretty_debug ( & self , source : & str ) -> DebugDocBuilder {
2019-10-28 15:46:50 +01:00
match self {
2021-01-29 14:43:35 +01:00
None = > DbgDocBldr ::description ( " nothing " ) ,
2019-11-21 15:33:14 +01:00
Some ( d ) = > d . pretty_debug ( source ) ,
2019-10-28 15:46:50 +01:00
}
}
}
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
impl < T > HasFallibleSpan for Option < Tagged < T > > {
2019-10-28 15:46:50 +01:00
fn maybe_span ( & self ) -> Option < Span > {
match self {
None = > None ,
Some ( value ) = > Some ( value . tag . span ) ,
}
}
}
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
impl < T > HasSpan for Tagged < T > {
2019-10-28 15:46:50 +01:00
fn span ( & self ) -> Span {
self . tag . span
}
}