2020-04-13 09:59:57 +02:00
|
|
|
use std::cmp::{Ord, Ordering, PartialOrd};
|
2020-05-20 19:31:04 +02:00
|
|
|
use std::convert::From;
|
2020-04-13 09:59:57 +02:00
|
|
|
use std::hash::{Hash, Hasher};
|
2020-04-06 09:16:14 +02:00
|
|
|
use std::path::PathBuf;
|
|
|
|
|
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
use crate::{hir, Primitive, UntaggedValue};
|
|
|
|
use crate::{PathMember, ShellTypeName};
|
2019-06-22 03:36:57 +02:00
|
|
|
use derive_new::new;
|
2020-04-06 09:16:14 +02:00
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
use nu_errors::ParseError;
|
Extract core stuff into own crates
This commit extracts five new crates:
- nu-source, which contains the core source-code handling logic in Nu,
including Text, Span, and also the pretty.rs-based debug logic
- nu-parser, which is the parser and expander logic
- nu-protocol, which is the bulk of the types and basic conveniences
used by plugins
- nu-errors, which contains ShellError, ParseError and error handling
conveniences
- nu-textview, which is the textview plugin extracted into a crate
One of the major consequences of this refactor is that it's no longer
possible to `impl X for Spanned<Y>` outside of the `nu-source` crate, so
a lot of types became more concrete (Value became a concrete type
instead of Spanned<Value>, for example).
This also turned a number of inherent methods in the main nu crate into
plain functions (impl Value {} became a bunch of functions in the
`value` namespace in `crate::data::value`).
2019-11-26 03:30:48 +01:00
|
|
|
use nu_source::{
|
2020-04-06 09:16:14 +02:00
|
|
|
b, DebugDocBuilder, HasSpan, PrettyDebug, PrettyDebugRefineKind, PrettyDebugWithSource,
|
Extract core stuff into own crates
This commit extracts five new crates:
- nu-source, which contains the core source-code handling logic in Nu,
including Text, Span, and also the pretty.rs-based debug logic
- nu-parser, which is the parser and expander logic
- nu-protocol, which is the bulk of the types and basic conveniences
used by plugins
- nu-errors, which contains ShellError, ParseError and error handling
conveniences
- nu-textview, which is the textview plugin extracted into a crate
One of the major consequences of this refactor is that it's no longer
possible to `impl X for Spanned<Y>` outside of the `nu-source` crate, so
a lot of types became more concrete (Value became a concrete type
instead of Spanned<Value>, for example).
This also turned a number of inherent methods in the main nu crate into
plain functions (impl Value {} became a bunch of functions in the
`value` namespace in `crate::data::value`).
2019-11-26 03:30:48 +01:00
|
|
|
};
|
2020-04-13 09:59:57 +02:00
|
|
|
use nu_source::{IntoSpanned, Span, Spanned, SpannedItem, Tag};
|
2020-04-06 09:16:14 +02:00
|
|
|
|
|
|
|
use bigdecimal::BigDecimal;
|
|
|
|
use indexmap::IndexMap;
|
|
|
|
use log::trace;
|
2020-07-10 19:48:11 +02:00
|
|
|
use num_bigint::{BigInt, ToBigInt};
|
2020-04-06 09:16:14 +02:00
|
|
|
use num_traits::identities::Zero;
|
2020-07-10 19:48:11 +02:00
|
|
|
use num_traits::{FromPrimitive, ToPrimitive};
|
2020-04-06 09:16:14 +02:00
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct InternalCommand {
|
|
|
|
pub name: String,
|
|
|
|
pub name_span: Span,
|
|
|
|
pub args: crate::hir::Call,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl InternalCommand {
|
|
|
|
pub fn new(name: String, name_span: Span, full_span: Span) -> InternalCommand {
|
|
|
|
InternalCommand {
|
2020-05-04 10:44:33 +02:00
|
|
|
name,
|
2020-04-13 09:59:57 +02:00
|
|
|
name_span,
|
|
|
|
args: crate::hir::Call::new(
|
2020-05-04 10:44:33 +02:00
|
|
|
Box::new(SpannedExpression::new(
|
|
|
|
Expression::Command(name_span),
|
|
|
|
name_span,
|
|
|
|
)),
|
2020-04-13 09:59:57 +02:00
|
|
|
full_span,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
}
|
2020-06-27 07:38:19 +02:00
|
|
|
|
|
|
|
pub fn expand_it_usage(&mut self) {
|
|
|
|
if let Some(positionals) = &mut self.args.positional {
|
|
|
|
for arg in positionals {
|
|
|
|
arg.expand_it_usage();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
2020-04-20 08:41:51 +02:00
|
|
|
pub struct ClassifiedBlock {
|
|
|
|
pub block: Block,
|
2020-04-13 09:59:57 +02:00
|
|
|
// this is not a Result to make it crystal clear that these shapes
|
|
|
|
// aren't intended to be used directly with `?`
|
|
|
|
pub failed: Option<ParseError>,
|
|
|
|
}
|
|
|
|
|
2020-04-20 08:41:51 +02:00
|
|
|
impl ClassifiedBlock {
|
|
|
|
pub fn new(block: Block, failed: Option<ParseError>) -> ClassifiedBlock {
|
|
|
|
ClassifiedBlock { block, failed }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct ClassifiedPipeline {
|
|
|
|
pub commands: Commands,
|
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
impl ClassifiedPipeline {
|
2020-04-20 08:41:51 +02:00
|
|
|
pub fn new(commands: Commands) -> ClassifiedPipeline {
|
|
|
|
ClassifiedPipeline { commands }
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub enum ClassifiedCommand {
|
|
|
|
Expr(Box<SpannedExpression>),
|
|
|
|
#[allow(unused)]
|
|
|
|
Dynamic(crate::hir::Call),
|
|
|
|
Internal(InternalCommand),
|
|
|
|
Error(ParseError),
|
|
|
|
}
|
|
|
|
|
2020-04-27 04:04:54 +02:00
|
|
|
impl ClassifiedCommand {
|
|
|
|
pub fn has_it_iteration(&self) -> bool {
|
|
|
|
match self {
|
|
|
|
ClassifiedCommand::Internal(command) => {
|
|
|
|
let mut result = command.args.head.has_shallow_it_usage();
|
|
|
|
|
|
|
|
if let Some(positionals) = &command.args.positional {
|
|
|
|
for arg in positionals {
|
|
|
|
result = result || arg.has_shallow_it_usage();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-12 05:58:16 +02:00
|
|
|
if let Some(named) = &command.args.named {
|
|
|
|
for arg in named.iter() {
|
|
|
|
if let NamedValue::Value(_, value) = arg.1 {
|
|
|
|
result = result || value.has_shallow_it_usage();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-27 04:04:54 +02:00
|
|
|
result
|
|
|
|
}
|
|
|
|
ClassifiedCommand::Expr(expr) => expr.has_shallow_it_usage(),
|
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|
2020-04-29 09:51:46 +02:00
|
|
|
|
|
|
|
pub fn expand_it_usage(&mut self) {
|
|
|
|
match self {
|
2020-06-27 07:38:19 +02:00
|
|
|
ClassifiedCommand::Internal(command) => command.expand_it_usage(),
|
|
|
|
ClassifiedCommand::Expr(expr) => expr.expand_it_usage(),
|
2020-04-29 09:51:46 +02:00
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
2020-04-27 04:04:54 +02:00
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct Commands {
|
|
|
|
pub list: Vec<ClassifiedCommand>,
|
|
|
|
pub span: Span,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Commands {
|
|
|
|
pub fn new(span: Span) -> Commands {
|
|
|
|
Commands { list: vec![], span }
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn push(&mut self, command: ClassifiedCommand) {
|
|
|
|
self.list.push(command);
|
|
|
|
}
|
2020-04-27 04:04:54 +02:00
|
|
|
|
|
|
|
/// Convert all shallow uses of $it to `each { use of $it }`, converting each to a per-row command
|
|
|
|
pub fn expand_it_usage(&mut self) {
|
|
|
|
for idx in 0..self.list.len() {
|
2020-04-29 09:51:46 +02:00
|
|
|
self.list[idx].expand_it_usage();
|
|
|
|
}
|
|
|
|
for idx in 1..self.list.len() {
|
2020-04-27 04:04:54 +02:00
|
|
|
if self.list[idx].has_it_iteration() {
|
|
|
|
self.list[idx] = ClassifiedCommand::Internal(InternalCommand {
|
|
|
|
name: "each".to_string(),
|
|
|
|
name_span: self.span,
|
|
|
|
args: hir::Call {
|
|
|
|
head: Box::new(SpannedExpression {
|
2020-04-29 21:09:14 +02:00
|
|
|
expr: Expression::Synthetic(Synthetic::String(
|
|
|
|
"expanded-each".to_string(),
|
|
|
|
)),
|
2020-04-27 04:04:54 +02:00
|
|
|
span: self.span,
|
|
|
|
}),
|
|
|
|
named: None,
|
|
|
|
span: self.span,
|
|
|
|
positional: Some(vec![SpannedExpression {
|
|
|
|
expr: Expression::Block(Block {
|
|
|
|
block: vec![Commands {
|
|
|
|
list: vec![self.list[idx].clone()],
|
|
|
|
span: self.span,
|
|
|
|
}],
|
|
|
|
span: self.span,
|
|
|
|
}),
|
|
|
|
span: self.span,
|
|
|
|
}]),
|
|
|
|
is_last: false, // FIXME
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
|
2020-04-20 08:41:51 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct Block {
|
|
|
|
pub block: Vec<Commands>,
|
|
|
|
pub span: Span,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Block {
|
|
|
|
pub fn new(span: Span) -> Block {
|
|
|
|
Block {
|
|
|
|
block: vec![],
|
|
|
|
span,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn push(&mut self, commands: Commands) {
|
|
|
|
self.block.push(commands);
|
|
|
|
}
|
2020-04-27 04:04:54 +02:00
|
|
|
|
|
|
|
/// Convert all shallow uses of $it to `each { use of $it }`, converting each to a per-row command
|
|
|
|
pub fn expand_it_usage(&mut self) {
|
|
|
|
for commands in &mut self.block {
|
|
|
|
commands.expand_it_usage();
|
|
|
|
}
|
|
|
|
}
|
2020-06-27 23:04:57 +02:00
|
|
|
|
|
|
|
pub fn set_is_last(&mut self, is_last: bool) {
|
|
|
|
if let Some(pipeline) = self.block.last_mut() {
|
|
|
|
if let Some(command) = pipeline.list.last_mut() {
|
|
|
|
if let ClassifiedCommand::Internal(internal) = command {
|
|
|
|
internal.args.is_last = is_last;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_is_last(&mut self) -> Option<bool> {
|
|
|
|
if let Some(pipeline) = self.block.last_mut() {
|
|
|
|
if let Some(command) = pipeline.list.last_mut() {
|
|
|
|
if let ClassifiedCommand::Internal(internal) = command {
|
|
|
|
return Some(internal.args.is_last);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
None
|
|
|
|
}
|
2020-04-20 08:41:51 +02:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
2020-04-13 09:59:57 +02:00
|
|
|
pub struct ExternalStringCommand {
|
2020-04-06 09:16:14 +02:00
|
|
|
pub name: Spanned<String>,
|
|
|
|
pub args: Vec<Spanned<String>>,
|
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
|
2020-04-20 23:45:11 +02:00
|
|
|
impl ExternalArgs {
|
|
|
|
pub fn iter(&self) -> impl Iterator<Item = &SpannedExpression> {
|
|
|
|
self.list.iter()
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-20 23:45:11 +02:00
|
|
|
impl std::ops::Deref for ExternalArgs {
|
|
|
|
type Target = [SpannedExpression];
|
2020-04-13 09:59:57 +02:00
|
|
|
|
2020-04-20 23:45:11 +02:00
|
|
|
fn deref(&self) -> &[SpannedExpression] {
|
|
|
|
&self.list
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct ExternalArgs {
|
2020-04-20 23:45:11 +02:00
|
|
|
pub list: Vec<SpannedExpression>,
|
2020-04-13 09:59:57 +02:00
|
|
|
pub span: Span,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct ExternalCommand {
|
|
|
|
pub name: String,
|
|
|
|
|
|
|
|
pub name_tag: Tag,
|
|
|
|
pub args: ExternalArgs,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ExternalCommand {
|
|
|
|
pub fn has_it_argument(&self) -> bool {
|
2020-04-20 23:45:11 +02:00
|
|
|
self.args.iter().any(|arg| match arg {
|
|
|
|
SpannedExpression {
|
|
|
|
expr: Expression::Path(path),
|
|
|
|
..
|
2020-04-23 22:00:49 +02:00
|
|
|
} => {
|
|
|
|
let Path { head, .. } = &**path;
|
|
|
|
match head {
|
2020-04-20 23:45:11 +02:00
|
|
|
SpannedExpression {
|
|
|
|
expr: Expression::Variable(Variable::It(_)),
|
|
|
|
..
|
|
|
|
} => true,
|
|
|
|
_ => false,
|
2020-04-23 22:00:49 +02:00
|
|
|
}
|
|
|
|
}
|
2020-04-20 23:45:11 +02:00
|
|
|
_ => false,
|
|
|
|
})
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl HasSpan for ExternalCommand {
|
|
|
|
fn span(&self) -> Span {
|
|
|
|
self.name_tag.span.until(self.args.span)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Copy, Deserialize, Serialize)]
|
|
|
|
pub enum Unit {
|
|
|
|
// Filesize units
|
|
|
|
Byte,
|
|
|
|
Kilobyte,
|
|
|
|
Megabyte,
|
|
|
|
Gigabyte,
|
|
|
|
Terabyte,
|
|
|
|
Petabyte,
|
|
|
|
|
|
|
|
// Duration units
|
2020-07-10 19:48:11 +02:00
|
|
|
Nanosecond,
|
|
|
|
Microsecond,
|
|
|
|
Millisecond,
|
2020-04-06 09:16:14 +02:00
|
|
|
Second,
|
|
|
|
Minute,
|
|
|
|
Hour,
|
|
|
|
Day,
|
|
|
|
Week,
|
|
|
|
Month,
|
|
|
|
Year,
|
|
|
|
}
|
2019-08-16 00:18:18 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub enum Member {
|
|
|
|
String(/* outer */ Span, /* inner */ Span),
|
|
|
|
Int(BigInt, Span),
|
|
|
|
Bare(Spanned<String>),
|
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl Member {
|
|
|
|
pub fn to_path_member(&self) -> PathMember {
|
|
|
|
match self {
|
|
|
|
//Member::String(outer, inner) => PathMember::string(inner.slice(source), *outer),
|
|
|
|
Member::Int(int, span) => PathMember::int(int.clone(), *span),
|
|
|
|
Member::Bare(spanned_string) => {
|
|
|
|
PathMember::string(spanned_string.item.clone(), spanned_string.span)
|
|
|
|
}
|
|
|
|
_ => unimplemented!("Need to finish to_path_member"),
|
|
|
|
}
|
|
|
|
}
|
2019-12-04 22:14:52 +01:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebugWithSource for Member {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
match self {
|
|
|
|
Member::String(outer, _) => b::value(outer.slice(source)),
|
|
|
|
Member::Int(int, _) => b::value(format!("{}", int)),
|
|
|
|
Member::Bare(span) => b::value(span.span.slice(source)),
|
2019-12-04 22:14:52 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl HasSpan for Member {
|
2019-12-04 22:14:52 +01:00
|
|
|
fn span(&self) -> Span {
|
2020-04-06 09:16:14 +02:00
|
|
|
match self {
|
|
|
|
Member::String(outer, ..) => *outer,
|
|
|
|
Member::Int(_, int) => *int,
|
|
|
|
Member::Bare(name) => name.span,
|
|
|
|
}
|
2019-12-04 22:14:52 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub enum Number {
|
|
|
|
Int(BigInt),
|
|
|
|
Decimal(BigDecimal),
|
2019-12-04 22:14:52 +01:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebug for Number {
|
|
|
|
fn pretty(&self) -> DebugDocBuilder {
|
|
|
|
match self {
|
|
|
|
Number::Int(int) => b::primitive(int),
|
|
|
|
Number::Decimal(decimal) => b::primitive(decimal),
|
|
|
|
}
|
|
|
|
}
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
macro_rules! primitive_int {
|
|
|
|
($($ty:ty)*) => {
|
|
|
|
$(
|
|
|
|
impl From<$ty> for Number {
|
|
|
|
fn from(int: $ty) -> Number {
|
|
|
|
Number::Int(BigInt::zero() + int)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<&$ty> for Number {
|
|
|
|
fn from(int: &$ty) -> Number {
|
|
|
|
Number::Int(BigInt::zero() + *int)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)*
|
2020-01-17 23:46:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
primitive_int!(i8 u8 i16 u16 i32 u32 i64 u64 i128 u128);
|
|
|
|
|
|
|
|
macro_rules! primitive_decimal {
|
|
|
|
($($ty:tt -> $from:tt),*) => {
|
|
|
|
$(
|
|
|
|
impl From<$ty> for Number {
|
|
|
|
fn from(decimal: $ty) -> Number {
|
|
|
|
if let Some(num) = BigDecimal::$from(decimal) {
|
|
|
|
Number::Decimal(num)
|
|
|
|
} else {
|
|
|
|
unreachable!("Internal error: BigDecimal 'from' failed")
|
|
|
|
}
|
|
|
|
}
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl From<&$ty> for Number {
|
|
|
|
fn from(decimal: &$ty) -> Number {
|
|
|
|
if let Some(num) = BigDecimal::$from(*decimal) {
|
|
|
|
Number::Decimal(num)
|
|
|
|
} else {
|
|
|
|
unreachable!("Internal error: BigDecimal 'from' failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)*
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
primitive_decimal!(f32 -> from_f32, f64 -> from_f64);
|
2019-09-10 17:31:21 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl std::ops::Mul for Number {
|
|
|
|
type Output = Number;
|
2019-06-22 03:36:57 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
fn mul(self, other: Number) -> Number {
|
|
|
|
match (self, other) {
|
|
|
|
(Number::Int(a), Number::Int(b)) => Number::Int(a * b),
|
|
|
|
(Number::Int(a), Number::Decimal(b)) => Number::Decimal(BigDecimal::from(a) * b),
|
|
|
|
(Number::Decimal(a), Number::Int(b)) => Number::Decimal(a * BigDecimal::from(b)),
|
|
|
|
(Number::Decimal(a), Number::Decimal(b)) => Number::Decimal(a * b),
|
|
|
|
}
|
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
// For literals
|
|
|
|
impl std::ops::Mul<u32> for Number {
|
|
|
|
type Output = Number;
|
|
|
|
|
|
|
|
fn mul(self, other: u32) -> Number {
|
2019-06-30 08:14:40 +02:00
|
|
|
match self {
|
2020-04-06 09:16:14 +02:00
|
|
|
Number::Int(left) => Number::Int(left * (other as i64)),
|
|
|
|
Number::Decimal(left) => Number::Decimal(left * BigDecimal::from(other)),
|
2019-06-30 08:14:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-10 19:48:11 +02:00
|
|
|
impl ToBigInt for Number {
|
|
|
|
fn to_bigint(&self) -> Option<BigInt> {
|
|
|
|
match self {
|
|
|
|
Number::Int(int) => Some(int.clone()),
|
|
|
|
// The BigDecimal to BigInt conversion always return Some().
|
|
|
|
// FIXME: This conversion might not be want we want, it just remove the scale.
|
|
|
|
Number::Decimal(decimal) => decimal.to_bigint(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebug for Unit {
|
|
|
|
fn pretty(&self) -> DebugDocBuilder {
|
|
|
|
b::keyword(self.as_str())
|
|
|
|
}
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
|
|
|
|
2020-05-30 15:25:39 +02:00
|
|
|
pub fn convert_number_to_u64(number: &Number) -> u64 {
|
2020-04-06 09:16:14 +02:00
|
|
|
match number {
|
|
|
|
Number::Int(big_int) => {
|
|
|
|
if let Some(x) = big_int.to_u64() {
|
|
|
|
x
|
|
|
|
} else {
|
|
|
|
unreachable!("Internal error: convert_number_to_u64 given incompatible number")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Number::Decimal(big_decimal) => {
|
|
|
|
if let Some(x) = big_decimal.to_u64() {
|
|
|
|
x
|
|
|
|
} else {
|
|
|
|
unreachable!("Internal error: convert_number_to_u64 given incompatible number")
|
|
|
|
}
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl Unit {
|
|
|
|
pub fn as_str(self) -> &'static str {
|
|
|
|
match self {
|
|
|
|
Unit::Byte => "B",
|
|
|
|
Unit::Kilobyte => "KB",
|
|
|
|
Unit::Megabyte => "MB",
|
|
|
|
Unit::Gigabyte => "GB",
|
|
|
|
Unit::Terabyte => "TB",
|
|
|
|
Unit::Petabyte => "PB",
|
2020-07-10 19:48:11 +02:00
|
|
|
Unit::Nanosecond => "ns",
|
|
|
|
Unit::Microsecond => "us",
|
|
|
|
Unit::Millisecond => "ms",
|
2020-04-06 09:16:14 +02:00
|
|
|
Unit::Second => "s",
|
|
|
|
Unit::Minute => "m",
|
|
|
|
Unit::Hour => "h",
|
|
|
|
Unit::Day => "d",
|
|
|
|
Unit::Week => "w",
|
|
|
|
Unit::Month => "M",
|
|
|
|
Unit::Year => "y",
|
|
|
|
}
|
|
|
|
}
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
pub fn compute(self, size: &Number) -> UntaggedValue {
|
|
|
|
let size = size.clone();
|
|
|
|
|
|
|
|
match self {
|
2020-07-11 04:17:37 +02:00
|
|
|
Unit::Byte => filesize(convert_number_to_u64(&size)),
|
|
|
|
Unit::Kilobyte => filesize(convert_number_to_u64(&size) * 1024),
|
|
|
|
Unit::Megabyte => filesize(convert_number_to_u64(&size) * 1024 * 1024),
|
|
|
|
Unit::Gigabyte => filesize(convert_number_to_u64(&size) * 1024 * 1024 * 1024),
|
|
|
|
Unit::Terabyte => filesize(convert_number_to_u64(&size) * 1024 * 1024 * 1024 * 1024),
|
2020-04-18 03:50:58 +02:00
|
|
|
Unit::Petabyte => {
|
2020-07-11 04:17:37 +02:00
|
|
|
filesize(convert_number_to_u64(&size) * 1024 * 1024 * 1024 * 1024 * 1024)
|
2020-04-18 03:50:58 +02:00
|
|
|
}
|
2020-07-10 19:48:11 +02:00
|
|
|
Unit::Nanosecond => duration(size.to_bigint().expect("Conversion should never fail.")),
|
|
|
|
Unit::Microsecond => {
|
|
|
|
duration(size.to_bigint().expect("Conversion should never fail.") * 1000)
|
|
|
|
}
|
|
|
|
Unit::Millisecond => {
|
|
|
|
duration(size.to_bigint().expect("Conversion should never fail.") * 1000 * 1000)
|
|
|
|
}
|
|
|
|
Unit::Second => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.") * 1000 * 1000 * 1000,
|
|
|
|
),
|
|
|
|
Unit::Minute => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.") * 60 * 1000 * 1000 * 1000,
|
|
|
|
),
|
|
|
|
Unit::Hour => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.")
|
|
|
|
* 60
|
|
|
|
* 60
|
|
|
|
* 1000
|
|
|
|
* 1000
|
|
|
|
* 1000,
|
|
|
|
),
|
|
|
|
Unit::Day => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.")
|
|
|
|
* 24
|
|
|
|
* 60
|
|
|
|
* 60
|
|
|
|
* 1000
|
|
|
|
* 1000
|
|
|
|
* 1000,
|
|
|
|
),
|
|
|
|
Unit::Week => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.")
|
|
|
|
* 7
|
|
|
|
* 24
|
|
|
|
* 60
|
|
|
|
* 60
|
|
|
|
* 1000
|
|
|
|
* 1000
|
|
|
|
* 1000,
|
|
|
|
),
|
|
|
|
// FIXME: Number of days per month should not always be 30.
|
|
|
|
Unit::Month => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.")
|
|
|
|
* 30
|
|
|
|
* 24
|
|
|
|
* 60
|
|
|
|
* 60
|
|
|
|
* 1000
|
|
|
|
* 1000
|
|
|
|
* 1000,
|
|
|
|
),
|
|
|
|
// FIXME: Number of days per year should not be 365.
|
|
|
|
Unit::Year => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.")
|
|
|
|
* 365
|
|
|
|
* 24
|
|
|
|
* 60
|
|
|
|
* 60
|
|
|
|
* 1000
|
|
|
|
* 1000
|
|
|
|
* 1000,
|
|
|
|
),
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
|
|
|
}
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
}
|
|
|
|
|
2020-07-11 04:17:37 +02:00
|
|
|
pub fn filesize(size_in_bytes: u64) -> UntaggedValue {
|
|
|
|
UntaggedValue::Primitive(Primitive::Filesize(size_in_bytes))
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
|
|
|
|
2020-07-10 19:48:11 +02:00
|
|
|
pub fn duration(nanos: BigInt) -> UntaggedValue {
|
|
|
|
UntaggedValue::Primitive(Primitive::Duration(nanos))
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
pub struct SpannedExpression {
|
|
|
|
pub expr: Expression,
|
2019-11-21 15:33:14 +01:00
|
|
|
pub span: Span,
|
|
|
|
}
|
|
|
|
|
2020-02-09 03:24:33 +01:00
|
|
|
impl SpannedExpression {
|
|
|
|
pub fn new(expr: Expression, span: Span) -> SpannedExpression {
|
|
|
|
SpannedExpression { expr, span }
|
|
|
|
}
|
2020-04-18 03:50:58 +02:00
|
|
|
|
|
|
|
pub fn precedence(&self) -> usize {
|
|
|
|
match self.expr {
|
|
|
|
Expression::Literal(Literal::Operator(operator)) => {
|
|
|
|
// Higher precedence binds tighter
|
|
|
|
|
|
|
|
match operator {
|
|
|
|
Operator::Multiply | Operator::Divide => 100,
|
|
|
|
Operator::Plus | Operator::Minus => 90,
|
|
|
|
Operator::NotContains
|
|
|
|
| Operator::Contains
|
|
|
|
| Operator::LessThan
|
|
|
|
| Operator::LessThanOrEqual
|
|
|
|
| Operator::GreaterThan
|
|
|
|
| Operator::GreaterThanOrEqual
|
|
|
|
| Operator::Equal
|
|
|
|
| Operator::NotEqual
|
2020-04-26 07:32:17 +02:00
|
|
|
| Operator::In
|
|
|
|
| Operator::NotIn => 80,
|
2020-04-18 03:50:58 +02:00
|
|
|
Operator::And => 50,
|
|
|
|
Operator::Or => 40, // TODO: should we have And and Or be different precedence?
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => 0,
|
|
|
|
}
|
|
|
|
}
|
2020-04-27 04:04:54 +02:00
|
|
|
|
|
|
|
pub fn has_shallow_it_usage(&self) -> bool {
|
|
|
|
match &self.expr {
|
|
|
|
Expression::Binary(binary) => {
|
|
|
|
binary.left.has_shallow_it_usage() || binary.right.has_shallow_it_usage()
|
|
|
|
}
|
|
|
|
Expression::Variable(Variable::It(_)) => true,
|
|
|
|
Expression::Path(path) => path.head.has_shallow_it_usage(),
|
2020-05-27 10:29:05 +02:00
|
|
|
Expression::List(list) => {
|
|
|
|
for l in list {
|
|
|
|
if l.has_shallow_it_usage() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
2020-05-16 05:18:24 +02:00
|
|
|
Expression::Invocation(block) => {
|
|
|
|
for commands in block.block.iter() {
|
|
|
|
for command in commands.list.iter() {
|
|
|
|
if command.has_it_iteration() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
2020-04-27 04:04:54 +02:00
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|
2020-06-27 07:38:19 +02:00
|
|
|
|
|
|
|
pub fn expand_it_usage(&mut self) {
|
|
|
|
match self {
|
|
|
|
SpannedExpression {
|
|
|
|
expr: Expression::Block(block),
|
|
|
|
..
|
|
|
|
} => {
|
|
|
|
block.expand_it_usage();
|
|
|
|
}
|
|
|
|
SpannedExpression {
|
|
|
|
expr: Expression::Invocation(block),
|
|
|
|
..
|
|
|
|
} => {
|
|
|
|
block.expand_it_usage();
|
|
|
|
}
|
|
|
|
SpannedExpression {
|
|
|
|
expr: Expression::List(list),
|
|
|
|
..
|
|
|
|
} => {
|
|
|
|
for item in list.iter_mut() {
|
|
|
|
item.expand_it_usage();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SpannedExpression {
|
|
|
|
expr: Expression::Path(path),
|
|
|
|
..
|
|
|
|
} => {
|
|
|
|
if let SpannedExpression {
|
|
|
|
expr: Expression::Invocation(block),
|
|
|
|
..
|
|
|
|
} = &mut path.head
|
|
|
|
{
|
|
|
|
block.expand_it_usage();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
}
|
2020-02-09 03:24:33 +01:00
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
impl std::ops::Deref for SpannedExpression {
|
|
|
|
type Target = Expression;
|
2019-11-21 15:33:14 +01:00
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
fn deref(&self) -> &Expression {
|
2019-11-21 15:33:14 +01:00
|
|
|
&self.expr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
impl HasSpan for SpannedExpression {
|
2019-11-21 15:33:14 +01:00
|
|
|
fn span(&self) -> Span {
|
|
|
|
self.span
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
impl ShellTypeName for SpannedExpression {
|
|
|
|
fn type_name(&self) -> &'static str {
|
|
|
|
self.expr.type_name()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for SpannedExpression {
|
|
|
|
fn refined_pretty_debug(&self, refine: PrettyDebugRefineKind, source: &str) -> DebugDocBuilder {
|
|
|
|
match refine {
|
|
|
|
PrettyDebugRefineKind::ContextFree => self.refined_pretty_debug(refine, source),
|
|
|
|
PrettyDebugRefineKind::WithContext => match &self.expr {
|
|
|
|
Expression::Literal(literal) => literal
|
|
|
|
.clone()
|
|
|
|
.into_spanned(self.span)
|
|
|
|
.refined_pretty_debug(refine, source),
|
|
|
|
Expression::ExternalWord => {
|
|
|
|
b::delimit("e\"", b::primitive(self.span.slice(source)), "\"").group()
|
|
|
|
}
|
|
|
|
Expression::Synthetic(s) => match s {
|
|
|
|
Synthetic::String(_) => {
|
|
|
|
b::delimit("s\"", b::primitive(self.span.slice(source)), "\"").group()
|
|
|
|
}
|
|
|
|
},
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Variable(Variable::Other(_, _)) => b::keyword(self.span.slice(source)),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Variable(Variable::It(_)) => b::keyword("$it"),
|
|
|
|
Expression::Binary(binary) => binary.pretty_debug(source),
|
|
|
|
Expression::Range(range) => range.pretty_debug(source),
|
|
|
|
Expression::Block(_) => b::opaque("block"),
|
2020-05-16 05:18:24 +02:00
|
|
|
Expression::Invocation(_) => b::opaque("invocation"),
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Garbage => b::opaque("garbage"),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::List(list) => b::delimit(
|
|
|
|
"[",
|
|
|
|
b::intersperse(
|
|
|
|
list.iter()
|
|
|
|
.map(|item| item.refined_pretty_debug(refine, source)),
|
|
|
|
b::space(),
|
|
|
|
),
|
|
|
|
"]",
|
|
|
|
),
|
|
|
|
Expression::Path(path) => path.pretty_debug(source),
|
|
|
|
Expression::FilePath(path) => b::typed("path", b::primitive(path.display())),
|
|
|
|
Expression::ExternalCommand(external) => {
|
2020-04-06 09:16:14 +02:00
|
|
|
b::keyword("^") + b::keyword(external.name.span.slice(source))
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
}
|
|
|
|
Expression::Command(command) => b::keyword(command.slice(source)),
|
|
|
|
Expression::Boolean(boolean) => match boolean {
|
|
|
|
true => b::primitive("$yes"),
|
|
|
|
false => b::primitive("$no"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-21 15:33:14 +01:00
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
match &self.expr {
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Literal(literal) => {
|
|
|
|
literal.clone().into_spanned(self.span).pretty_debug(source)
|
|
|
|
}
|
|
|
|
Expression::ExternalWord => {
|
2019-11-21 15:33:14 +01:00
|
|
|
b::typed("external word", b::primitive(self.span.slice(source)))
|
|
|
|
}
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Synthetic(s) => match s {
|
2019-11-21 15:33:14 +01:00
|
|
|
Synthetic::String(s) => b::typed("synthetic", b::primitive(format!("{:?}", s))),
|
|
|
|
},
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Variable(Variable::Other(_, _)) => b::keyword(self.span.slice(source)),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Variable(Variable::It(_)) => b::keyword("$it"),
|
|
|
|
Expression::Binary(binary) => binary.pretty_debug(source),
|
|
|
|
Expression::Range(range) => range.pretty_debug(source),
|
|
|
|
Expression::Block(_) => b::opaque("block"),
|
2020-05-16 05:18:24 +02:00
|
|
|
Expression::Invocation(_) => b::opaque("invocation"),
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Garbage => b::opaque("garbage"),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::List(list) => b::delimit(
|
2019-11-21 15:33:14 +01:00
|
|
|
"[",
|
|
|
|
b::intersperse(
|
|
|
|
list.iter().map(|item| item.pretty_debug(source)),
|
|
|
|
b::space(),
|
|
|
|
),
|
|
|
|
"]",
|
|
|
|
),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Path(path) => path.pretty_debug(source),
|
|
|
|
Expression::FilePath(path) => b::typed("path", b::primitive(path.display())),
|
|
|
|
Expression::ExternalCommand(external) => b::typed(
|
|
|
|
"command",
|
2020-04-06 09:16:14 +02:00
|
|
|
b::keyword("^") + b::primitive(external.name.span.slice(source)),
|
2019-11-21 15:33:14 +01:00
|
|
|
),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Command(command) => {
|
2019-11-21 15:33:14 +01:00
|
|
|
b::typed("command", b::primitive(command.slice(source)))
|
|
|
|
}
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Boolean(boolean) => match boolean {
|
2019-11-21 15:33:14 +01:00
|
|
|
true => b::primitive("$yes"),
|
|
|
|
false => b::primitive("$no"),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub enum Variable {
|
|
|
|
It(Span),
|
|
|
|
Other(String, Span),
|
|
|
|
}
|
2019-07-24 00:22:11 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Clone, Copy, PartialOrd, Ord, Eq, Hash, PartialEq, Deserialize, Serialize)]
|
2020-04-18 03:50:58 +02:00
|
|
|
pub enum Operator {
|
2020-04-06 09:16:14 +02:00
|
|
|
Equal,
|
|
|
|
NotEqual,
|
|
|
|
LessThan,
|
|
|
|
GreaterThan,
|
|
|
|
LessThanOrEqual,
|
|
|
|
GreaterThanOrEqual,
|
|
|
|
Contains,
|
|
|
|
NotContains,
|
2020-04-18 03:50:58 +02:00
|
|
|
Plus,
|
|
|
|
Minus,
|
|
|
|
Multiply,
|
|
|
|
Divide,
|
|
|
|
In,
|
2020-04-26 07:32:17 +02:00
|
|
|
NotIn,
|
2020-04-18 03:50:58 +02:00
|
|
|
And,
|
|
|
|
Or,
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize, new)]
|
|
|
|
pub struct Binary {
|
|
|
|
pub left: SpannedExpression,
|
2020-04-18 03:50:58 +02:00
|
|
|
pub op: SpannedExpression,
|
2020-04-06 09:16:14 +02:00
|
|
|
pub right: SpannedExpression,
|
|
|
|
}
|
2019-11-04 16:47:03 +01:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebugWithSource for Binary {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
b::delimit(
|
|
|
|
"<",
|
|
|
|
self.left.pretty_debug(source)
|
|
|
|
+ b::space()
|
|
|
|
+ b::keyword(self.op.span.slice(source))
|
|
|
|
+ b::space()
|
|
|
|
+ self.right.pretty_debug(source),
|
|
|
|
">",
|
|
|
|
)
|
|
|
|
.group()
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
}
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub enum Synthetic {
|
|
|
|
String(String),
|
|
|
|
}
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl ShellTypeName for Synthetic {
|
|
|
|
fn type_name(&self) -> &'static str {
|
|
|
|
match self {
|
|
|
|
Synthetic::String(_) => "string",
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
}
|
|
|
|
}
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub struct Range {
|
|
|
|
pub left: SpannedExpression,
|
|
|
|
pub dotdot: Span,
|
|
|
|
pub right: SpannedExpression,
|
2019-07-24 00:22:11 +02:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebugWithSource for Range {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
b::delimit(
|
|
|
|
"<",
|
|
|
|
self.left.pretty_debug(source)
|
|
|
|
+ b::space()
|
|
|
|
+ b::keyword(self.dotdot.slice(source))
|
|
|
|
+ b::space()
|
|
|
|
+ self.right.pretty_debug(source),
|
|
|
|
">",
|
|
|
|
)
|
|
|
|
.group()
|
2019-06-29 10:55:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
pub enum Literal {
|
2019-08-30 19:29:04 +02:00
|
|
|
Number(Number),
|
2020-04-06 09:16:14 +02:00
|
|
|
Size(Spanned<Number>, Spanned<Unit>),
|
2020-04-18 03:50:58 +02:00
|
|
|
Operator(Operator),
|
2020-04-06 09:16:14 +02:00
|
|
|
String(String),
|
2019-10-27 02:01:58 +01:00
|
|
|
GlobPattern(String),
|
2019-11-04 16:47:03 +01:00
|
|
|
ColumnPath(Vec<Member>),
|
2020-04-10 09:56:48 +02:00
|
|
|
Bare(String),
|
2019-06-22 03:36:57 +02:00
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
impl Literal {
|
|
|
|
pub fn into_spanned(self, span: impl Into<Span>) -> SpannedLiteral {
|
|
|
|
SpannedLiteral {
|
2019-11-21 15:33:14 +01:00
|
|
|
literal: self,
|
|
|
|
span: span.into(),
|
|
|
|
}
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
//, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize
|
|
|
|
#[derive(Debug, Clone)]
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
pub struct SpannedLiteral {
|
|
|
|
pub literal: Literal,
|
2019-11-21 15:33:14 +01:00
|
|
|
pub span: Span,
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
}
|
|
|
|
|
2019-11-21 15:33:14 +01:00
|
|
|
impl ShellTypeName for Literal {
|
|
|
|
fn type_name(&self) -> &'static str {
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
match &self {
|
|
|
|
Literal::Number(..) => "number",
|
|
|
|
Literal::Size(..) => "size",
|
|
|
|
Literal::String(..) => "string",
|
|
|
|
Literal::ColumnPath(..) => "column path",
|
2020-04-10 09:56:48 +02:00
|
|
|
Literal::Bare(_) => "string",
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Literal::GlobPattern(_) => "pattern",
|
2020-04-06 09:16:14 +02:00
|
|
|
Literal::Operator(_) => "operator",
|
2019-07-24 00:22:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
impl PrettyDebugWithSource for SpannedLiteral {
|
|
|
|
fn refined_pretty_debug(&self, refine: PrettyDebugRefineKind, source: &str) -> DebugDocBuilder {
|
|
|
|
match refine {
|
|
|
|
PrettyDebugRefineKind::ContextFree => self.pretty_debug(source),
|
|
|
|
PrettyDebugRefineKind::WithContext => match &self.literal {
|
|
|
|
Literal::Number(number) => number.pretty(),
|
|
|
|
Literal::Size(number, unit) => (number.pretty() + unit.pretty()).group(),
|
2020-04-06 09:16:14 +02:00
|
|
|
Literal::String(string) => b::primitive(format!("{:?}", string)), //string.slice(source))),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Literal::GlobPattern(pattern) => b::primitive(pattern),
|
|
|
|
Literal::ColumnPath(path) => {
|
|
|
|
b::intersperse_with_source(path.iter(), b::space(), source)
|
|
|
|
}
|
2020-04-10 09:56:48 +02:00
|
|
|
Literal::Bare(bare) => b::delimit("b\"", b::primitive(bare), "\""),
|
2020-04-06 09:16:14 +02:00
|
|
|
Literal::Operator(operator) => b::primitive(format!("{:?}", operator)),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-21 15:33:14 +01:00
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
match &self.literal {
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Literal::Number(number) => number.pretty(),
|
|
|
|
Literal::Size(number, unit) => {
|
|
|
|
b::typed("size", (number.pretty() + unit.pretty()).group())
|
|
|
|
}
|
|
|
|
Literal::String(string) => b::typed(
|
|
|
|
"string",
|
2020-04-06 09:16:14 +02:00
|
|
|
b::primitive(format!("{:?}", string)), //string.slice(source))),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
),
|
|
|
|
Literal::GlobPattern(pattern) => b::typed("pattern", b::primitive(pattern)),
|
|
|
|
Literal::ColumnPath(path) => b::typed(
|
2019-11-21 15:33:14 +01:00
|
|
|
"column path",
|
|
|
|
b::intersperse_with_source(path.iter(), b::space(), source),
|
|
|
|
),
|
2020-04-10 09:56:48 +02:00
|
|
|
Literal::Bare(bare) => b::typed("bare", b::primitive(bare)),
|
2020-04-06 09:16:14 +02:00
|
|
|
Literal::Operator(operator) => {
|
|
|
|
b::typed("operator", b::primitive(format!("{:?}", operator)))
|
|
|
|
}
|
2019-06-30 08:14:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, new, Deserialize, Serialize)]
|
|
|
|
pub struct Path {
|
|
|
|
pub head: SpannedExpression,
|
|
|
|
pub tail: Vec<PathMember>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for Path {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
self.head.pretty_debug(source)
|
|
|
|
+ b::operator(".")
|
|
|
|
+ b::intersperse(self.tail.iter().map(|m| m.pretty()), b::operator("."))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub enum Expression {
|
|
|
|
Literal(Literal),
|
|
|
|
ExternalWord,
|
|
|
|
Synthetic(Synthetic),
|
|
|
|
Variable(Variable),
|
|
|
|
Binary(Box<Binary>),
|
|
|
|
Range(Box<Range>),
|
2020-04-20 08:41:51 +02:00
|
|
|
Block(hir::Block),
|
2020-04-06 09:16:14 +02:00
|
|
|
List(Vec<SpannedExpression>),
|
|
|
|
Path(Box<Path>),
|
|
|
|
|
|
|
|
FilePath(PathBuf),
|
2020-04-13 09:59:57 +02:00
|
|
|
ExternalCommand(ExternalStringCommand),
|
2020-04-06 09:16:14 +02:00
|
|
|
Command(Span),
|
2020-05-16 05:18:24 +02:00
|
|
|
Invocation(hir::Block),
|
2020-04-06 09:16:14 +02:00
|
|
|
|
|
|
|
Boolean(bool),
|
|
|
|
|
|
|
|
// Trying this approach out: if we let parsing always be infallible
|
|
|
|
// we can use the same parse and just place bad token markers in the output
|
|
|
|
// We can later throw an error if we try to process them further.
|
|
|
|
Garbage,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ShellTypeName for Expression {
|
|
|
|
fn type_name(&self) -> &'static str {
|
|
|
|
match self {
|
|
|
|
Expression::Literal(literal) => literal.type_name(),
|
|
|
|
Expression::Synthetic(synthetic) => synthetic.type_name(),
|
|
|
|
Expression::Command(..) => "command",
|
|
|
|
Expression::ExternalWord => "external word",
|
|
|
|
Expression::FilePath(..) => "file path",
|
|
|
|
Expression::Variable(..) => "variable",
|
|
|
|
Expression::List(..) => "list",
|
|
|
|
Expression::Binary(..) => "binary",
|
|
|
|
Expression::Range(..) => "range",
|
|
|
|
Expression::Block(..) => "block",
|
2020-05-16 05:18:24 +02:00
|
|
|
Expression::Invocation(..) => "command invocation",
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Path(..) => "variable path",
|
|
|
|
Expression::Boolean(..) => "boolean",
|
|
|
|
Expression::ExternalCommand(..) => "external",
|
|
|
|
Expression::Garbage => "garbage",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl IntoSpanned for Expression {
|
|
|
|
type Output = SpannedExpression;
|
|
|
|
|
|
|
|
fn into_spanned(self, span: impl Into<Span>) -> Self::Output {
|
|
|
|
SpannedExpression {
|
|
|
|
expr: self,
|
|
|
|
span: span.into(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Expression {
|
|
|
|
pub fn integer(i: i64) -> Expression {
|
|
|
|
Expression::Literal(Literal::Number(Number::Int(BigInt::from(i))))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn decimal(f: f64) -> Expression {
|
|
|
|
Expression::Literal(Literal::Number(Number::Decimal(BigDecimal::from(f))))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn string(s: String) -> Expression {
|
|
|
|
Expression::Literal(Literal::String(s))
|
|
|
|
}
|
|
|
|
|
2020-04-18 03:50:58 +02:00
|
|
|
pub fn operator(operator: Operator) -> Expression {
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Literal(Literal::Operator(operator))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn range(left: SpannedExpression, dotdot: Span, right: SpannedExpression) -> Expression {
|
|
|
|
Expression::Range(Box::new(Range {
|
|
|
|
left,
|
|
|
|
dotdot,
|
|
|
|
right,
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn pattern(p: String) -> Expression {
|
|
|
|
Expression::Literal(Literal::GlobPattern(p))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn file_path(file_path: PathBuf) -> Expression {
|
|
|
|
Expression::FilePath(file_path)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn simple_column_path(members: Vec<Member>) -> Expression {
|
|
|
|
Expression::Literal(Literal::ColumnPath(members))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn path(head: SpannedExpression, tail: Vec<impl Into<PathMember>>) -> Expression {
|
|
|
|
let tail = tail.into_iter().map(|t| t.into()).collect();
|
|
|
|
Expression::Path(Box::new(Path::new(head, tail)))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn unit(i: Spanned<i64>, unit: Spanned<Unit>) -> Expression {
|
|
|
|
Expression::Literal(Literal::Size(
|
|
|
|
Number::Int(BigInt::from(i.item)).spanned(i.span),
|
|
|
|
unit,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn variable(v: String, span: Span) -> Expression {
|
|
|
|
if v == "$it" {
|
|
|
|
Expression::Variable(Variable::It(span))
|
|
|
|
} else {
|
|
|
|
Expression::Variable(Variable::Other(v, span))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
2020-04-06 09:16:14 +02:00
|
|
|
pub enum NamedValue {
|
|
|
|
AbsentSwitch,
|
|
|
|
PresentSwitch(Span),
|
|
|
|
AbsentValue,
|
|
|
|
Value(Span, SpannedExpression),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for NamedValue {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
match self {
|
|
|
|
NamedValue::AbsentSwitch => b::typed("switch", b::description("absent")),
|
|
|
|
NamedValue::PresentSwitch(_) => b::typed("switch", b::description("present")),
|
|
|
|
NamedValue::AbsentValue => b::description("absent"),
|
|
|
|
NamedValue::Value(_, value) => value.pretty_debug(source),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn refined_pretty_debug(&self, refine: PrettyDebugRefineKind, source: &str) -> DebugDocBuilder {
|
|
|
|
match refine {
|
|
|
|
PrettyDebugRefineKind::ContextFree => self.pretty_debug(source),
|
|
|
|
PrettyDebugRefineKind::WithContext => match self {
|
|
|
|
NamedValue::AbsentSwitch => b::value("absent"),
|
|
|
|
NamedValue::PresentSwitch(_) => b::value("present"),
|
|
|
|
NamedValue::AbsentValue => b::value("absent"),
|
|
|
|
NamedValue::Value(_, value) => value.refined_pretty_debug(refine, source),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
2020-04-06 09:16:14 +02:00
|
|
|
pub struct Call {
|
|
|
|
pub head: Box<SpannedExpression>,
|
|
|
|
pub positional: Option<Vec<SpannedExpression>>,
|
|
|
|
pub named: Option<NamedArguments>,
|
|
|
|
pub span: Span,
|
Move external closer to internal (#1611)
* Refactor InputStream and affected commands.
First, making `values` private and leaning on the `Stream` implementation makes
consumes of `InputStream` less likely to have to change in the future, if we
change what an `InputStream` is internally.
Second, we're dropping `Option<InputStream>` as the input to pipelines,
internals, and externals. Instead, `InputStream.is_empty` can be used to check
for "emptiness". Empty streams are typically only ever used as the first input
to a pipeline.
* Add run_external internal command.
We want to push external commands closer to internal commands, eventually
eliminating the concept of "external" completely. This means we can consolidate
a couple of things:
- Variable evaluation (for example, `$it`, `$nu`, alias vars)
- Behaviour of whole stream vs per-item external execution
It should also make it easier for us to start introducing argument signatures
for external commands,
* Update run_external.rs
* Update run_external.rs
* Update run_external.rs
* Update run_external.rs
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
2020-04-20 05:30:44 +02:00
|
|
|
pub is_last: bool,
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Call {
|
|
|
|
pub fn switch_preset(&self, switch: &str) -> bool {
|
|
|
|
self.named
|
|
|
|
.as_ref()
|
|
|
|
.map(|n| n.switch_present(switch))
|
|
|
|
.unwrap_or(false)
|
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
pub fn set_initial_flags(&mut self, signature: &crate::Signature) {
|
2020-04-06 09:16:14 +02:00
|
|
|
for (named, value) in signature.named.iter() {
|
|
|
|
if self.named.is_none() {
|
|
|
|
self.named = Some(NamedArguments::new());
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(ref mut args) = self.named {
|
|
|
|
match value.0 {
|
2020-04-13 09:59:57 +02:00
|
|
|
crate::NamedType::Switch(_) => args.insert_switch(named, None),
|
2020-04-06 09:16:14 +02:00
|
|
|
_ => args.insert_optional(named, Span::new(0, 0), None),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for Call {
|
|
|
|
fn refined_pretty_debug(&self, refine: PrettyDebugRefineKind, source: &str) -> DebugDocBuilder {
|
|
|
|
match refine {
|
|
|
|
PrettyDebugRefineKind::ContextFree => self.pretty_debug(source),
|
|
|
|
PrettyDebugRefineKind::WithContext => {
|
|
|
|
self.head
|
|
|
|
.refined_pretty_debug(PrettyDebugRefineKind::WithContext, source)
|
|
|
|
+ b::preceded_option(
|
|
|
|
Some(b::space()),
|
|
|
|
self.positional.as_ref().map(|pos| {
|
|
|
|
b::intersperse(
|
|
|
|
pos.iter().map(|expr| {
|
|
|
|
expr.refined_pretty_debug(
|
|
|
|
PrettyDebugRefineKind::WithContext,
|
|
|
|
source,
|
|
|
|
)
|
|
|
|
}),
|
|
|
|
b::space(),
|
|
|
|
)
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
+ b::preceded_option(
|
|
|
|
Some(b::space()),
|
|
|
|
self.named.as_ref().map(|named| {
|
|
|
|
named.refined_pretty_debug(PrettyDebugRefineKind::WithContext, source)
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
b::typed(
|
|
|
|
"call",
|
|
|
|
self.refined_pretty_debug(PrettyDebugRefineKind::WithContext, source),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Call {
|
|
|
|
pub fn new(head: Box<SpannedExpression>, span: Span) -> Call {
|
|
|
|
Call {
|
|
|
|
head,
|
|
|
|
positional: None,
|
|
|
|
named: None,
|
|
|
|
span,
|
Move external closer to internal (#1611)
* Refactor InputStream and affected commands.
First, making `values` private and leaning on the `Stream` implementation makes
consumes of `InputStream` less likely to have to change in the future, if we
change what an `InputStream` is internally.
Second, we're dropping `Option<InputStream>` as the input to pipelines,
internals, and externals. Instead, `InputStream.is_empty` can be used to check
for "emptiness". Empty streams are typically only ever used as the first input
to a pipeline.
* Add run_external internal command.
We want to push external commands closer to internal commands, eventually
eliminating the concept of "external" completely. This means we can consolidate
a couple of things:
- Variable evaluation (for example, `$it`, `$nu`, alias vars)
- Behaviour of whole stream vs per-item external execution
It should also make it easier for us to start introducing argument signatures
for external commands,
* Update run_external.rs
* Update run_external.rs
* Update run_external.rs
* Update run_external.rs
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
2020-04-20 05:30:44 +02:00
|
|
|
is_last: false,
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
|
|
|
|
pub enum Delimiter {
|
|
|
|
Paren,
|
|
|
|
Brace,
|
|
|
|
Square,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Copy, Clone)]
|
|
|
|
pub enum FlatShape {
|
|
|
|
OpenDelimiter(Delimiter),
|
|
|
|
CloseDelimiter(Delimiter),
|
|
|
|
Type,
|
|
|
|
Identifier,
|
|
|
|
ItVariable,
|
|
|
|
Variable,
|
2020-04-18 03:50:58 +02:00
|
|
|
Operator,
|
2020-04-06 09:16:14 +02:00
|
|
|
Dot,
|
|
|
|
DotDot,
|
|
|
|
InternalCommand,
|
|
|
|
ExternalCommand,
|
|
|
|
ExternalWord,
|
|
|
|
BareMember,
|
|
|
|
StringMember,
|
|
|
|
String,
|
|
|
|
Path,
|
|
|
|
Word,
|
|
|
|
Keyword,
|
|
|
|
Pipe,
|
|
|
|
GlobPattern,
|
|
|
|
Flag,
|
|
|
|
ShorthandFlag,
|
|
|
|
Int,
|
|
|
|
Decimal,
|
|
|
|
Garbage,
|
|
|
|
Whitespace,
|
|
|
|
Separator,
|
|
|
|
Comment,
|
|
|
|
Size { number: Span, unit: Span },
|
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
|
2020-04-06 09:16:14 +02:00
|
|
|
pub struct NamedArguments {
|
|
|
|
pub named: IndexMap<String, NamedValue>,
|
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[allow(clippy::derive_hash_xor_eq)]
|
|
|
|
impl Hash for NamedArguments {
|
|
|
|
/// Create the hash function to allow the Hash trait for dictionaries
|
|
|
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
|
|
|
let mut entries = self.named.clone();
|
|
|
|
entries.sort_keys();
|
|
|
|
entries.keys().collect::<Vec<&String>>().hash(state);
|
|
|
|
entries.values().collect::<Vec<&NamedValue>>().hash(state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PartialOrd for NamedArguments {
|
|
|
|
/// Compare two dictionaries for sort ordering
|
|
|
|
fn partial_cmp(&self, other: &NamedArguments) -> Option<Ordering> {
|
|
|
|
let this: Vec<&String> = self.named.keys().collect();
|
|
|
|
let that: Vec<&String> = other.named.keys().collect();
|
|
|
|
|
|
|
|
if this != that {
|
|
|
|
return this.partial_cmp(&that);
|
|
|
|
}
|
|
|
|
|
|
|
|
let this: Vec<&NamedValue> = self.named.values().collect();
|
|
|
|
let that: Vec<&NamedValue> = self.named.values().collect();
|
|
|
|
|
|
|
|
this.partial_cmp(&that)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Ord for NamedArguments {
|
|
|
|
/// Compare two dictionaries for ordering
|
|
|
|
fn cmp(&self, other: &NamedArguments) -> Ordering {
|
|
|
|
let this: Vec<&String> = self.named.keys().collect();
|
|
|
|
let that: Vec<&String> = other.named.keys().collect();
|
|
|
|
|
|
|
|
if this != that {
|
|
|
|
return this.cmp(&that);
|
|
|
|
}
|
|
|
|
|
|
|
|
let this: Vec<&NamedValue> = self.named.values().collect();
|
|
|
|
let that: Vec<&NamedValue> = self.named.values().collect();
|
|
|
|
|
|
|
|
this.cmp(&that)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl NamedArguments {
|
|
|
|
pub fn new() -> NamedArguments {
|
|
|
|
Default::default()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn iter(&self) -> impl Iterator<Item = (&String, &NamedValue)> {
|
|
|
|
self.named.iter()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get(&self, name: &str) -> Option<&NamedValue> {
|
|
|
|
self.named.get(name)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_empty(&self) -> bool {
|
|
|
|
self.named.is_empty()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl NamedArguments {
|
|
|
|
pub fn insert_switch(&mut self, name: impl Into<String>, switch: Option<Flag>) {
|
|
|
|
let name = name.into();
|
|
|
|
trace!("Inserting switch -- {} = {:?}", name, switch);
|
|
|
|
|
|
|
|
match switch {
|
|
|
|
None => self.named.insert(name, NamedValue::AbsentSwitch),
|
|
|
|
Some(flag) => self
|
|
|
|
.named
|
|
|
|
.insert(name, NamedValue::PresentSwitch(flag.name)),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn insert_optional(
|
|
|
|
&mut self,
|
|
|
|
name: impl Into<String>,
|
|
|
|
flag_span: Span,
|
|
|
|
expr: Option<SpannedExpression>,
|
|
|
|
) {
|
|
|
|
match expr {
|
|
|
|
None => self.named.insert(name.into(), NamedValue::AbsentValue),
|
|
|
|
Some(expr) => self
|
|
|
|
.named
|
|
|
|
.insert(name.into(), NamedValue::Value(flag_span, expr)),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn insert_mandatory(
|
|
|
|
&mut self,
|
|
|
|
name: impl Into<String>,
|
|
|
|
flag_span: Span,
|
|
|
|
expr: SpannedExpression,
|
|
|
|
) {
|
|
|
|
self.named
|
|
|
|
.insert(name.into(), NamedValue::Value(flag_span, expr));
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn switch_present(&self, switch: &str) -> bool {
|
|
|
|
self.named
|
|
|
|
.get(switch)
|
|
|
|
.map(|t| match t {
|
|
|
|
NamedValue::PresentSwitch(_) => true,
|
|
|
|
_ => false,
|
|
|
|
})
|
|
|
|
.unwrap_or(false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for NamedArguments {
|
|
|
|
fn refined_pretty_debug(&self, refine: PrettyDebugRefineKind, source: &str) -> DebugDocBuilder {
|
|
|
|
match refine {
|
|
|
|
PrettyDebugRefineKind::ContextFree => self.pretty_debug(source),
|
|
|
|
PrettyDebugRefineKind::WithContext => b::intersperse(
|
|
|
|
self.named.iter().map(|(key, value)| {
|
|
|
|
b::key(key)
|
|
|
|
+ b::equals()
|
|
|
|
+ value.refined_pretty_debug(PrettyDebugRefineKind::WithContext, source)
|
|
|
|
}),
|
|
|
|
b::space(),
|
|
|
|
),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
b::delimit(
|
|
|
|
"(",
|
|
|
|
self.refined_pretty_debug(PrettyDebugRefineKind::WithContext, source),
|
|
|
|
")",
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
|
|
pub enum FlagKind {
|
|
|
|
Shorthand,
|
|
|
|
Longhand,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, new)]
|
|
|
|
pub struct Flag {
|
|
|
|
pub(crate) kind: FlagKind,
|
|
|
|
pub(crate) name: Span,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for Flag {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
let prefix = match self.kind {
|
|
|
|
FlagKind::Longhand => b::description("--"),
|
|
|
|
FlagKind::Shorthand => b::description("-"),
|
|
|
|
};
|
|
|
|
|
|
|
|
prefix + b::description(self.name.slice(source))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Flag {
|
|
|
|
pub fn color(&self, span: impl Into<Span>) -> Spanned<FlatShape> {
|
|
|
|
match self.kind {
|
|
|
|
FlagKind::Longhand => FlatShape::Flag.spanned(span.into()),
|
|
|
|
FlagKind::Shorthand => FlatShape::ShorthandFlag.spanned(span.into()),
|
|
|
|
}
|
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
}
|