2020-04-13 09:59:57 +02:00
|
|
|
use std::cmp::{Ord, Ordering, PartialOrd};
|
2020-05-20 19:31:04 +02:00
|
|
|
use std::convert::From;
|
2020-04-13 09:59:57 +02:00
|
|
|
use std::hash::{Hash, Hasher};
|
2020-04-06 09:16:14 +02:00
|
|
|
use std::path::PathBuf;
|
|
|
|
|
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
|
2020-12-18 08:53:49 +01:00
|
|
|
use crate::Signature;
|
|
|
|
use crate::{hir, Dictionary, PositionalType, Primitive, SyntaxShape, UntaggedValue};
|
2020-04-13 09:59:57 +02:00
|
|
|
use crate::{PathMember, ShellTypeName};
|
2019-06-22 03:36:57 +02:00
|
|
|
use derive_new::new;
|
2020-04-06 09:16:14 +02:00
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
use nu_errors::ParseError;
|
Extract core stuff into own crates
This commit extracts five new crates:
- nu-source, which contains the core source-code handling logic in Nu,
including Text, Span, and also the pretty.rs-based debug logic
- nu-parser, which is the parser and expander logic
- nu-protocol, which is the bulk of the types and basic conveniences
used by plugins
- nu-errors, which contains ShellError, ParseError and error handling
conveniences
- nu-textview, which is the textview plugin extracted into a crate
One of the major consequences of this refactor is that it's no longer
possible to `impl X for Spanned<Y>` outside of the `nu-source` crate, so
a lot of types became more concrete (Value became a concrete type
instead of Spanned<Value>, for example).
This also turned a number of inherent methods in the main nu crate into
plain functions (impl Value {} became a bunch of functions in the
`value` namespace in `crate::data::value`).
2019-11-26 03:30:48 +01:00
|
|
|
use nu_source::{
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr, DebugDocBuilder, HasSpan, PrettyDebug, PrettyDebugRefineKind, PrettyDebugWithSource,
|
Extract core stuff into own crates
This commit extracts five new crates:
- nu-source, which contains the core source-code handling logic in Nu,
including Text, Span, and also the pretty.rs-based debug logic
- nu-parser, which is the parser and expander logic
- nu-protocol, which is the bulk of the types and basic conveniences
used by plugins
- nu-errors, which contains ShellError, ParseError and error handling
conveniences
- nu-textview, which is the textview plugin extracted into a crate
One of the major consequences of this refactor is that it's no longer
possible to `impl X for Spanned<Y>` outside of the `nu-source` crate, so
a lot of types became more concrete (Value became a concrete type
instead of Spanned<Value>, for example).
This also turned a number of inherent methods in the main nu crate into
plain functions (impl Value {} became a bunch of functions in the
`value` namespace in `crate::data::value`).
2019-11-26 03:30:48 +01:00
|
|
|
};
|
2020-04-13 09:59:57 +02:00
|
|
|
use nu_source::{IntoSpanned, Span, Spanned, SpannedItem, Tag};
|
2020-04-06 09:16:14 +02:00
|
|
|
|
|
|
|
use bigdecimal::BigDecimal;
|
|
|
|
use indexmap::IndexMap;
|
|
|
|
use log::trace;
|
2020-07-10 19:48:11 +02:00
|
|
|
use num_bigint::{BigInt, ToBigInt};
|
2020-04-06 09:16:14 +02:00
|
|
|
use num_traits::identities::Zero;
|
2020-07-10 19:48:11 +02:00
|
|
|
use num_traits::{FromPrimitive, ToPrimitive};
|
2020-04-06 09:16:14 +02:00
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct InternalCommand {
|
|
|
|
pub name: String,
|
|
|
|
pub name_span: Span,
|
|
|
|
pub args: crate::hir::Call,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl InternalCommand {
|
|
|
|
pub fn new(name: String, name_span: Span, full_span: Span) -> InternalCommand {
|
|
|
|
InternalCommand {
|
2020-05-04 10:44:33 +02:00
|
|
|
name,
|
2020-04-13 09:59:57 +02:00
|
|
|
name_span,
|
|
|
|
args: crate::hir::Call::new(
|
2020-08-25 03:47:58 +02:00
|
|
|
Box::new(SpannedExpression::new(Expression::Command, name_span)),
|
2020-04-13 09:59:57 +02:00
|
|
|
full_span,
|
|
|
|
),
|
|
|
|
}
|
|
|
|
}
|
2020-10-27 08:37:35 +01:00
|
|
|
|
|
|
|
pub fn has_it_usage(&self) -> bool {
|
|
|
|
self.args.has_it_usage()
|
|
|
|
}
|
2020-12-18 08:53:49 +01:00
|
|
|
|
|
|
|
pub fn get_free_variables(&self, known_variables: &mut Vec<String>) -> Vec<String> {
|
|
|
|
self.args.get_free_variables(known_variables)
|
|
|
|
}
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
2020-04-20 08:41:51 +02:00
|
|
|
pub struct ClassifiedBlock {
|
|
|
|
pub block: Block,
|
2020-04-13 09:59:57 +02:00
|
|
|
// this is not a Result to make it crystal clear that these shapes
|
|
|
|
// aren't intended to be used directly with `?`
|
|
|
|
pub failed: Option<ParseError>,
|
|
|
|
}
|
|
|
|
|
2020-04-20 08:41:51 +02:00
|
|
|
impl ClassifiedBlock {
|
|
|
|
pub fn new(block: Block, failed: Option<ParseError>) -> ClassifiedBlock {
|
|
|
|
ClassifiedBlock { block, failed }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct ClassifiedPipeline {
|
2020-12-18 08:53:49 +01:00
|
|
|
pub commands: Pipeline,
|
2020-04-20 08:41:51 +02:00
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
impl ClassifiedPipeline {
|
2020-12-18 08:53:49 +01:00
|
|
|
pub fn new(commands: Pipeline) -> ClassifiedPipeline {
|
2020-04-20 08:41:51 +02:00
|
|
|
ClassifiedPipeline { commands }
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub enum ClassifiedCommand {
|
|
|
|
Expr(Box<SpannedExpression>),
|
|
|
|
Dynamic(crate::hir::Call),
|
|
|
|
Internal(InternalCommand),
|
|
|
|
Error(ParseError),
|
|
|
|
}
|
|
|
|
|
2020-10-27 08:37:35 +01:00
|
|
|
impl ClassifiedCommand {
|
|
|
|
fn has_it_usage(&self) -> bool {
|
|
|
|
match self {
|
|
|
|
ClassifiedCommand::Expr(expr) => expr.has_it_usage(),
|
|
|
|
ClassifiedCommand::Dynamic(call) => call.has_it_usage(),
|
|
|
|
ClassifiedCommand::Internal(internal) => internal.has_it_usage(),
|
|
|
|
ClassifiedCommand::Error(_) => false,
|
|
|
|
}
|
|
|
|
}
|
2020-12-18 08:53:49 +01:00
|
|
|
|
|
|
|
pub fn get_free_variables(&self, known_variables: &mut Vec<String>) -> Vec<String> {
|
|
|
|
match self {
|
|
|
|
ClassifiedCommand::Expr(expr) => expr.get_free_variables(known_variables),
|
|
|
|
ClassifiedCommand::Dynamic(call) => call.get_free_variables(known_variables),
|
|
|
|
ClassifiedCommand::Internal(internal) => internal.get_free_variables(known_variables),
|
|
|
|
_ => vec![],
|
|
|
|
}
|
|
|
|
}
|
2020-10-27 08:37:35 +01:00
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
2020-12-18 08:53:49 +01:00
|
|
|
pub struct Pipeline {
|
2020-04-13 09:59:57 +02:00
|
|
|
pub list: Vec<ClassifiedCommand>,
|
|
|
|
pub span: Span,
|
|
|
|
}
|
|
|
|
|
2020-12-18 08:53:49 +01:00
|
|
|
impl Pipeline {
|
|
|
|
pub fn new(span: Span) -> Pipeline {
|
|
|
|
Pipeline { list: vec![], span }
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn basic() -> Pipeline {
|
|
|
|
Pipeline {
|
|
|
|
list: vec![],
|
|
|
|
span: Span::unknown(),
|
|
|
|
}
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn push(&mut self, command: ClassifiedCommand) {
|
|
|
|
self.list.push(command);
|
|
|
|
}
|
2020-10-27 08:37:35 +01:00
|
|
|
|
|
|
|
pub fn has_it_usage(&self) -> bool {
|
|
|
|
self.list.iter().any(|cc| cc.has_it_usage())
|
|
|
|
}
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
|
2020-04-20 08:41:51 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
2020-12-18 08:53:49 +01:00
|
|
|
pub struct Group {
|
|
|
|
pub pipelines: Vec<Pipeline>,
|
|
|
|
pub span: Span,
|
|
|
|
}
|
|
|
|
impl Group {
|
|
|
|
pub fn new(pipelines: Vec<Pipeline>, span: Span) -> Group {
|
|
|
|
Group { pipelines, span }
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn basic() -> Group {
|
|
|
|
Group {
|
|
|
|
pipelines: vec![],
|
|
|
|
span: Span::unknown(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn push(&mut self, pipeline: Pipeline) {
|
|
|
|
self.pipelines.push(pipeline);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn has_it_usage(&self) -> bool {
|
|
|
|
self.pipelines.iter().any(|cc| cc.has_it_usage())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct CapturedBlock {
|
|
|
|
pub block: Block,
|
|
|
|
pub captured: Dictionary,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl CapturedBlock {
|
|
|
|
pub fn new(block: Block, captured: Dictionary) -> Self {
|
|
|
|
Self { block, captured }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
|
2020-04-20 08:41:51 +02:00
|
|
|
pub struct Block {
|
2020-12-18 08:53:49 +01:00
|
|
|
pub params: Signature,
|
|
|
|
pub block: Vec<Group>,
|
|
|
|
pub definitions: IndexMap<String, Block>,
|
2020-04-20 08:41:51 +02:00
|
|
|
pub span: Span,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Block {
|
2020-12-18 08:53:49 +01:00
|
|
|
pub fn new(
|
|
|
|
params: Signature,
|
|
|
|
block: Vec<Group>,
|
|
|
|
definitions: IndexMap<String, Block>,
|
|
|
|
span: Span,
|
|
|
|
) -> Block {
|
|
|
|
Block {
|
2020-10-27 08:37:35 +01:00
|
|
|
params,
|
|
|
|
block,
|
2020-12-18 08:53:49 +01:00
|
|
|
definitions,
|
2020-10-27 08:37:35 +01:00
|
|
|
span,
|
2020-12-18 08:53:49 +01:00
|
|
|
}
|
|
|
|
}
|
2020-10-27 19:47:11 +01:00
|
|
|
|
2020-12-18 08:53:49 +01:00
|
|
|
pub fn basic() -> Block {
|
|
|
|
Block {
|
|
|
|
params: Signature::new("<basic>"),
|
|
|
|
block: vec![],
|
|
|
|
definitions: IndexMap::new(),
|
|
|
|
span: Span::unknown(),
|
|
|
|
}
|
2020-04-20 08:41:51 +02:00
|
|
|
}
|
|
|
|
|
2020-12-18 08:53:49 +01:00
|
|
|
pub fn push(&mut self, group: Group) {
|
|
|
|
self.block.push(group);
|
2020-10-27 19:47:11 +01:00
|
|
|
self.infer_params();
|
2020-04-20 08:41:51 +02:00
|
|
|
}
|
2020-04-27 04:04:54 +02:00
|
|
|
|
2020-08-07 06:53:37 +02:00
|
|
|
pub fn set_redirect(&mut self, external_redirection: ExternalRedirection) {
|
2020-12-18 08:53:49 +01:00
|
|
|
if let Some(group) = self.block.last_mut() {
|
|
|
|
if let Some(pipeline) = group.pipelines.last_mut() {
|
2021-02-12 11:13:14 +01:00
|
|
|
if let Some(ClassifiedCommand::Internal(internal)) = pipeline.list.last_mut() {
|
|
|
|
internal.args.external_redirection = external_redirection;
|
2020-06-27 23:04:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-10-27 08:37:35 +01:00
|
|
|
|
|
|
|
pub fn has_it_usage(&self) -> bool {
|
|
|
|
self.block.iter().any(|x| x.has_it_usage())
|
|
|
|
}
|
|
|
|
|
2020-10-27 19:47:11 +01:00
|
|
|
pub fn infer_params(&mut self) {
|
2020-12-18 08:53:49 +01:00
|
|
|
// FIXME: re-enable inference later
|
|
|
|
if self.params.positional.is_empty() && self.has_it_usage() {
|
|
|
|
self.params.positional = vec![(
|
|
|
|
PositionalType::Mandatory("$it".to_string(), SyntaxShape::Any),
|
|
|
|
"implied $it".to_string(),
|
|
|
|
)];
|
2020-10-27 08:37:35 +01:00
|
|
|
}
|
|
|
|
}
|
2020-12-18 08:53:49 +01:00
|
|
|
|
|
|
|
pub fn get_free_variables(&self, known_variables: &mut Vec<String>) -> Vec<String> {
|
|
|
|
let mut known_variables = known_variables.clone();
|
|
|
|
let positional_params: Vec<_> = self
|
|
|
|
.params
|
|
|
|
.positional
|
|
|
|
.iter()
|
|
|
|
.map(|(_, name)| name.clone())
|
|
|
|
.collect();
|
|
|
|
known_variables.extend_from_slice(&positional_params);
|
|
|
|
|
|
|
|
let mut free_variables = vec![];
|
|
|
|
for group in &self.block {
|
|
|
|
for pipeline in &group.pipelines {
|
|
|
|
for elem in &pipeline.list {
|
|
|
|
free_variables
|
|
|
|
.extend_from_slice(&elem.get_free_variables(&mut known_variables));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free_variables
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[allow(clippy::derive_hash_xor_eq)]
|
|
|
|
impl Hash for Block {
|
|
|
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
|
|
|
let mut entries = self.definitions.clone();
|
|
|
|
entries.sort_keys();
|
|
|
|
|
|
|
|
// FIXME: this is incomplete
|
|
|
|
entries.keys().collect::<Vec<&String>>().hash(state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PartialOrd for Block {
|
|
|
|
/// Compare two dictionaries for sort ordering
|
|
|
|
fn partial_cmp(&self, other: &Block) -> Option<Ordering> {
|
|
|
|
let this: Vec<&String> = self.definitions.keys().collect();
|
|
|
|
let that: Vec<&String> = other.definitions.keys().collect();
|
|
|
|
|
|
|
|
// FIXME: this is incomplete
|
|
|
|
this.partial_cmp(&that)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Ord for Block {
|
|
|
|
/// Compare two dictionaries for ordering
|
|
|
|
fn cmp(&self, other: &Block) -> Ordering {
|
|
|
|
let this: Vec<&String> = self.definitions.keys().collect();
|
|
|
|
let that: Vec<&String> = other.definitions.keys().collect();
|
|
|
|
|
|
|
|
// FIXME: this is incomplete
|
|
|
|
this.cmp(&that)
|
|
|
|
}
|
2020-04-20 08:41:51 +02:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
2020-04-13 09:59:57 +02:00
|
|
|
pub struct ExternalStringCommand {
|
2020-04-06 09:16:14 +02:00
|
|
|
pub name: Spanned<String>,
|
|
|
|
pub args: Vec<Spanned<String>>,
|
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
|
2020-04-20 23:45:11 +02:00
|
|
|
impl ExternalArgs {
|
|
|
|
pub fn iter(&self) -> impl Iterator<Item = &SpannedExpression> {
|
|
|
|
self.list.iter()
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-20 23:45:11 +02:00
|
|
|
impl std::ops::Deref for ExternalArgs {
|
|
|
|
type Target = [SpannedExpression];
|
2020-04-13 09:59:57 +02:00
|
|
|
|
2020-04-20 23:45:11 +02:00
|
|
|
fn deref(&self) -> &[SpannedExpression] {
|
|
|
|
&self.list
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct ExternalArgs {
|
2020-04-20 23:45:11 +02:00
|
|
|
pub list: Vec<SpannedExpression>,
|
2020-04-13 09:59:57 +02:00
|
|
|
pub span: Span,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub struct ExternalCommand {
|
|
|
|
pub name: String,
|
|
|
|
|
|
|
|
pub name_tag: Tag,
|
|
|
|
pub args: ExternalArgs,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ExternalCommand {
|
2020-10-27 08:37:35 +01:00
|
|
|
pub fn has_it_usage(&self) -> bool {
|
2020-04-20 23:45:11 +02:00
|
|
|
self.args.iter().any(|arg| match arg {
|
|
|
|
SpannedExpression {
|
|
|
|
expr: Expression::Path(path),
|
|
|
|
..
|
2020-04-23 22:00:49 +02:00
|
|
|
} => {
|
|
|
|
let Path { head, .. } = &**path;
|
2020-10-26 07:55:52 +01:00
|
|
|
matches!(head, SpannedExpression{expr: Expression::Variable(x, ..), ..} if x == "$it")
|
2020-04-23 22:00:49 +02:00
|
|
|
}
|
2020-04-20 23:45:11 +02:00
|
|
|
_ => false,
|
|
|
|
})
|
2020-04-13 09:59:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl HasSpan for ExternalCommand {
|
|
|
|
fn span(&self) -> Span {
|
|
|
|
self.name_tag.span.until(self.args.span)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Copy, Deserialize, Serialize)]
|
|
|
|
pub enum Unit {
|
2021-02-10 03:31:12 +01:00
|
|
|
// Filesize units: metric
|
2020-04-06 09:16:14 +02:00
|
|
|
Byte,
|
|
|
|
Kilobyte,
|
|
|
|
Megabyte,
|
|
|
|
Gigabyte,
|
|
|
|
Terabyte,
|
|
|
|
Petabyte,
|
|
|
|
|
2021-02-10 03:31:12 +01:00
|
|
|
// Filesize units: ISO/IEC 80000
|
|
|
|
Kibibyte,
|
|
|
|
Mebibyte,
|
|
|
|
Gibibyte,
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
// Duration units
|
2020-07-10 19:48:11 +02:00
|
|
|
Nanosecond,
|
|
|
|
Microsecond,
|
|
|
|
Millisecond,
|
2020-04-06 09:16:14 +02:00
|
|
|
Second,
|
|
|
|
Minute,
|
|
|
|
Hour,
|
|
|
|
Day,
|
|
|
|
Week,
|
|
|
|
Month,
|
|
|
|
Year,
|
|
|
|
}
|
2019-08-16 00:18:18 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub enum Member {
|
|
|
|
String(/* outer */ Span, /* inner */ Span),
|
|
|
|
Int(BigInt, Span),
|
|
|
|
Bare(Spanned<String>),
|
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl Member {
|
|
|
|
pub fn to_path_member(&self) -> PathMember {
|
|
|
|
match self {
|
|
|
|
//Member::String(outer, inner) => PathMember::string(inner.slice(source), *outer),
|
|
|
|
Member::Int(int, span) => PathMember::int(int.clone(), *span),
|
|
|
|
Member::Bare(spanned_string) => {
|
|
|
|
PathMember::string(spanned_string.item.clone(), spanned_string.span)
|
|
|
|
}
|
|
|
|
_ => unimplemented!("Need to finish to_path_member"),
|
|
|
|
}
|
|
|
|
}
|
2019-12-04 22:14:52 +01:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebugWithSource for Member {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
match self {
|
2021-01-29 14:43:35 +01:00
|
|
|
Member::String(outer, _) => DbgDocBldr::value(outer.slice(source)),
|
|
|
|
Member::Int(int, _) => DbgDocBldr::value(format!("{}", int)),
|
|
|
|
Member::Bare(span) => DbgDocBldr::value(span.span.slice(source)),
|
2019-12-04 22:14:52 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl HasSpan for Member {
|
2019-12-04 22:14:52 +01:00
|
|
|
fn span(&self) -> Span {
|
2020-04-06 09:16:14 +02:00
|
|
|
match self {
|
|
|
|
Member::String(outer, ..) => *outer,
|
|
|
|
Member::Int(_, int) => *int,
|
|
|
|
Member::Bare(name) => name.span,
|
|
|
|
}
|
2019-12-04 22:14:52 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub enum Number {
|
|
|
|
Int(BigInt),
|
|
|
|
Decimal(BigDecimal),
|
2019-12-04 22:14:52 +01:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebug for Number {
|
|
|
|
fn pretty(&self) -> DebugDocBuilder {
|
|
|
|
match self {
|
2021-01-29 14:43:35 +01:00
|
|
|
Number::Int(int) => DbgDocBldr::primitive(int),
|
|
|
|
Number::Decimal(decimal) => DbgDocBldr::primitive(decimal),
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
}
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
macro_rules! primitive_int {
|
|
|
|
($($ty:ty)*) => {
|
|
|
|
$(
|
|
|
|
impl From<$ty> for Number {
|
|
|
|
fn from(int: $ty) -> Number {
|
|
|
|
Number::Int(BigInt::zero() + int)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<&$ty> for Number {
|
|
|
|
fn from(int: &$ty) -> Number {
|
|
|
|
Number::Int(BigInt::zero() + *int)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)*
|
2020-01-17 23:46:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
primitive_int!(i8 u8 i16 u16 i32 u32 i64 u64 i128 u128);
|
|
|
|
|
|
|
|
macro_rules! primitive_decimal {
|
|
|
|
($($ty:tt -> $from:tt),*) => {
|
|
|
|
$(
|
|
|
|
impl From<$ty> for Number {
|
|
|
|
fn from(decimal: $ty) -> Number {
|
|
|
|
if let Some(num) = BigDecimal::$from(decimal) {
|
|
|
|
Number::Decimal(num)
|
|
|
|
} else {
|
|
|
|
unreachable!("Internal error: BigDecimal 'from' failed")
|
|
|
|
}
|
|
|
|
}
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl From<&$ty> for Number {
|
|
|
|
fn from(decimal: &$ty) -> Number {
|
|
|
|
if let Some(num) = BigDecimal::$from(*decimal) {
|
|
|
|
Number::Decimal(num)
|
|
|
|
} else {
|
|
|
|
unreachable!("Internal error: BigDecimal 'from' failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)*
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
primitive_decimal!(f32 -> from_f32, f64 -> from_f64);
|
2019-09-10 17:31:21 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl std::ops::Mul for Number {
|
|
|
|
type Output = Number;
|
2019-06-22 03:36:57 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
fn mul(self, other: Number) -> Number {
|
|
|
|
match (self, other) {
|
|
|
|
(Number::Int(a), Number::Int(b)) => Number::Int(a * b),
|
|
|
|
(Number::Int(a), Number::Decimal(b)) => Number::Decimal(BigDecimal::from(a) * b),
|
|
|
|
(Number::Decimal(a), Number::Int(b)) => Number::Decimal(a * BigDecimal::from(b)),
|
|
|
|
(Number::Decimal(a), Number::Decimal(b)) => Number::Decimal(a * b),
|
|
|
|
}
|
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
// For literals
|
|
|
|
impl std::ops::Mul<u32> for Number {
|
|
|
|
type Output = Number;
|
|
|
|
|
|
|
|
fn mul(self, other: u32) -> Number {
|
2019-06-30 08:14:40 +02:00
|
|
|
match self {
|
2020-04-06 09:16:14 +02:00
|
|
|
Number::Int(left) => Number::Int(left * (other as i64)),
|
|
|
|
Number::Decimal(left) => Number::Decimal(left * BigDecimal::from(other)),
|
2019-06-30 08:14:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-10 19:48:11 +02:00
|
|
|
impl ToBigInt for Number {
|
|
|
|
fn to_bigint(&self) -> Option<BigInt> {
|
|
|
|
match self {
|
|
|
|
Number::Int(int) => Some(int.clone()),
|
|
|
|
// The BigDecimal to BigInt conversion always return Some().
|
|
|
|
// FIXME: This conversion might not be want we want, it just remove the scale.
|
|
|
|
Number::Decimal(decimal) => decimal.to_bigint(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebug for Unit {
|
|
|
|
fn pretty(&self) -> DebugDocBuilder {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::keyword(self.as_str())
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
|
|
|
|
2020-05-30 15:25:39 +02:00
|
|
|
pub fn convert_number_to_u64(number: &Number) -> u64 {
|
2020-04-06 09:16:14 +02:00
|
|
|
match number {
|
|
|
|
Number::Int(big_int) => {
|
|
|
|
if let Some(x) = big_int.to_u64() {
|
|
|
|
x
|
|
|
|
} else {
|
|
|
|
unreachable!("Internal error: convert_number_to_u64 given incompatible number")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Number::Decimal(big_decimal) => {
|
|
|
|
if let Some(x) = big_decimal.to_u64() {
|
|
|
|
x
|
|
|
|
} else {
|
|
|
|
unreachable!("Internal error: convert_number_to_u64 given incompatible number")
|
|
|
|
}
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl Unit {
|
|
|
|
pub fn as_str(self) -> &'static str {
|
|
|
|
match self {
|
|
|
|
Unit::Byte => "B",
|
|
|
|
Unit::Kilobyte => "KB",
|
|
|
|
Unit::Megabyte => "MB",
|
|
|
|
Unit::Gigabyte => "GB",
|
|
|
|
Unit::Terabyte => "TB",
|
|
|
|
Unit::Petabyte => "PB",
|
2021-02-10 03:31:12 +01:00
|
|
|
Unit::Kibibyte => "KiB",
|
|
|
|
Unit::Mebibyte => "MiB",
|
|
|
|
Unit::Gibibyte => "GiB",
|
2020-07-10 19:48:11 +02:00
|
|
|
Unit::Nanosecond => "ns",
|
|
|
|
Unit::Microsecond => "us",
|
|
|
|
Unit::Millisecond => "ms",
|
2020-08-15 21:03:28 +02:00
|
|
|
Unit::Second => "sec",
|
|
|
|
Unit::Minute => "min",
|
|
|
|
Unit::Hour => "hr",
|
|
|
|
Unit::Day => "day",
|
|
|
|
Unit::Week => "wk",
|
|
|
|
Unit::Month => "mon",
|
|
|
|
Unit::Year => "yr",
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
}
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
pub fn compute(self, size: &Number) -> UntaggedValue {
|
|
|
|
let size = size.clone();
|
|
|
|
|
|
|
|
match self {
|
2020-07-11 04:17:37 +02:00
|
|
|
Unit::Byte => filesize(convert_number_to_u64(&size)),
|
2021-02-10 03:31:12 +01:00
|
|
|
Unit::Kilobyte => filesize(convert_number_to_u64(&size) * 1000),
|
|
|
|
Unit::Megabyte => filesize(convert_number_to_u64(&size) * 1000 * 1000),
|
|
|
|
Unit::Gigabyte => filesize(convert_number_to_u64(&size) * 1000 * 1000 * 1000),
|
|
|
|
Unit::Terabyte => filesize(convert_number_to_u64(&size) * 1000 * 1000 * 1000 * 1000),
|
2020-04-18 03:50:58 +02:00
|
|
|
Unit::Petabyte => {
|
2021-02-10 03:31:12 +01:00
|
|
|
filesize(convert_number_to_u64(&size) * 1000 * 1000 * 1000 * 1000 * 1000)
|
2020-04-18 03:50:58 +02:00
|
|
|
}
|
2021-02-10 03:31:12 +01:00
|
|
|
|
|
|
|
Unit::Kibibyte => filesize(convert_number_to_u64(&size) * 1024),
|
|
|
|
Unit::Mebibyte => filesize(convert_number_to_u64(&size) * 1024 * 1024),
|
|
|
|
Unit::Gibibyte => filesize(convert_number_to_u64(&size) * 1024 * 1024 * 1024),
|
|
|
|
|
2020-07-10 19:48:11 +02:00
|
|
|
Unit::Nanosecond => duration(size.to_bigint().expect("Conversion should never fail.")),
|
|
|
|
Unit::Microsecond => {
|
|
|
|
duration(size.to_bigint().expect("Conversion should never fail.") * 1000)
|
|
|
|
}
|
|
|
|
Unit::Millisecond => {
|
|
|
|
duration(size.to_bigint().expect("Conversion should never fail.") * 1000 * 1000)
|
|
|
|
}
|
|
|
|
Unit::Second => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.") * 1000 * 1000 * 1000,
|
|
|
|
),
|
|
|
|
Unit::Minute => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.") * 60 * 1000 * 1000 * 1000,
|
|
|
|
),
|
|
|
|
Unit::Hour => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.")
|
|
|
|
* 60
|
|
|
|
* 60
|
|
|
|
* 1000
|
|
|
|
* 1000
|
|
|
|
* 1000,
|
|
|
|
),
|
|
|
|
Unit::Day => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.")
|
|
|
|
* 24
|
|
|
|
* 60
|
|
|
|
* 60
|
|
|
|
* 1000
|
|
|
|
* 1000
|
|
|
|
* 1000,
|
|
|
|
),
|
|
|
|
Unit::Week => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.")
|
|
|
|
* 7
|
|
|
|
* 24
|
|
|
|
* 60
|
|
|
|
* 60
|
|
|
|
* 1000
|
|
|
|
* 1000
|
|
|
|
* 1000,
|
|
|
|
),
|
|
|
|
// FIXME: Number of days per month should not always be 30.
|
|
|
|
Unit::Month => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.")
|
|
|
|
* 30
|
|
|
|
* 24
|
|
|
|
* 60
|
|
|
|
* 60
|
|
|
|
* 1000
|
|
|
|
* 1000
|
|
|
|
* 1000,
|
|
|
|
),
|
|
|
|
// FIXME: Number of days per year should not be 365.
|
|
|
|
Unit::Year => duration(
|
|
|
|
size.to_bigint().expect("Conversion should never fail.")
|
|
|
|
* 365
|
|
|
|
* 24
|
|
|
|
* 60
|
|
|
|
* 60
|
|
|
|
* 1000
|
|
|
|
* 1000
|
|
|
|
* 1000,
|
|
|
|
),
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
|
|
|
}
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
}
|
|
|
|
|
2021-01-29 23:35:18 +01:00
|
|
|
pub fn filesize(size_in_bytes: impl Into<BigInt>) -> UntaggedValue {
|
|
|
|
UntaggedValue::Primitive(Primitive::Filesize(size_in_bytes.into()))
|
2019-11-21 15:33:14 +01:00
|
|
|
}
|
|
|
|
|
2020-07-10 19:48:11 +02:00
|
|
|
pub fn duration(nanos: BigInt) -> UntaggedValue {
|
|
|
|
UntaggedValue::Primitive(Primitive::Duration(nanos))
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
pub struct SpannedExpression {
|
|
|
|
pub expr: Expression,
|
2019-11-21 15:33:14 +01:00
|
|
|
pub span: Span,
|
|
|
|
}
|
|
|
|
|
2020-02-09 03:24:33 +01:00
|
|
|
impl SpannedExpression {
|
|
|
|
pub fn new(expr: Expression, span: Span) -> SpannedExpression {
|
|
|
|
SpannedExpression { expr, span }
|
|
|
|
}
|
2020-04-18 03:50:58 +02:00
|
|
|
|
|
|
|
pub fn precedence(&self) -> usize {
|
|
|
|
match self.expr {
|
|
|
|
Expression::Literal(Literal::Operator(operator)) => {
|
|
|
|
// Higher precedence binds tighter
|
|
|
|
|
|
|
|
match operator {
|
2021-01-29 14:44:02 +01:00
|
|
|
Operator::Pow => 100,
|
|
|
|
Operator::Multiply | Operator::Divide | Operator::Modulo => 95,
|
2020-04-18 03:50:58 +02:00
|
|
|
Operator::Plus | Operator::Minus => 90,
|
|
|
|
Operator::NotContains
|
|
|
|
| Operator::Contains
|
|
|
|
| Operator::LessThan
|
|
|
|
| Operator::LessThanOrEqual
|
|
|
|
| Operator::GreaterThan
|
|
|
|
| Operator::GreaterThanOrEqual
|
|
|
|
| Operator::Equal
|
|
|
|
| Operator::NotEqual
|
2020-04-26 07:32:17 +02:00
|
|
|
| Operator::In
|
|
|
|
| Operator::NotIn => 80,
|
2020-04-18 03:50:58 +02:00
|
|
|
Operator::And => 50,
|
|
|
|
Operator::Or => 40, // TODO: should we have And and Or be different precedence?
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => 0,
|
|
|
|
}
|
|
|
|
}
|
2020-10-27 08:37:35 +01:00
|
|
|
|
|
|
|
pub fn has_it_usage(&self) -> bool {
|
|
|
|
self.expr.has_it_usage()
|
|
|
|
}
|
2020-12-18 08:53:49 +01:00
|
|
|
|
|
|
|
pub fn get_free_variables(&self, known_variables: &mut Vec<String>) -> Vec<String> {
|
|
|
|
self.expr.get_free_variables(known_variables)
|
|
|
|
}
|
2020-02-09 03:24:33 +01:00
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
impl std::ops::Deref for SpannedExpression {
|
|
|
|
type Target = Expression;
|
2019-11-21 15:33:14 +01:00
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
fn deref(&self) -> &Expression {
|
2019-11-21 15:33:14 +01:00
|
|
|
&self.expr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
impl HasSpan for SpannedExpression {
|
2019-11-21 15:33:14 +01:00
|
|
|
fn span(&self) -> Span {
|
|
|
|
self.span
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
impl ShellTypeName for SpannedExpression {
|
|
|
|
fn type_name(&self) -> &'static str {
|
|
|
|
self.expr.type_name()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for SpannedExpression {
|
|
|
|
fn refined_pretty_debug(&self, refine: PrettyDebugRefineKind, source: &str) -> DebugDocBuilder {
|
|
|
|
match refine {
|
|
|
|
PrettyDebugRefineKind::ContextFree => self.refined_pretty_debug(refine, source),
|
|
|
|
PrettyDebugRefineKind::WithContext => match &self.expr {
|
|
|
|
Expression::Literal(literal) => literal
|
|
|
|
.clone()
|
|
|
|
.into_spanned(self.span)
|
|
|
|
.refined_pretty_debug(refine, source),
|
|
|
|
Expression::ExternalWord => {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::delimit("e\"", DbgDocBldr::primitive(self.span.slice(source)), "\"")
|
|
|
|
.group()
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
}
|
|
|
|
Expression::Synthetic(s) => match s {
|
2021-01-29 14:43:35 +01:00
|
|
|
Synthetic::String(_) => DbgDocBldr::delimit(
|
|
|
|
"s\"",
|
|
|
|
DbgDocBldr::primitive(self.span.slice(source)),
|
|
|
|
"\"",
|
|
|
|
)
|
|
|
|
.group(),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
},
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::Variable(_, _) => DbgDocBldr::keyword(self.span.slice(source)),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Binary(binary) => binary.pretty_debug(source),
|
|
|
|
Expression::Range(range) => range.pretty_debug(source),
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::Block(_) => DbgDocBldr::opaque("block"),
|
|
|
|
Expression::Invocation(_) => DbgDocBldr::opaque("invocation"),
|
|
|
|
Expression::Garbage => DbgDocBldr::opaque("garbage"),
|
|
|
|
Expression::List(list) => DbgDocBldr::delimit(
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
"[",
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::intersperse(
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
list.iter()
|
|
|
|
.map(|item| item.refined_pretty_debug(refine, source)),
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::space(),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
),
|
|
|
|
"]",
|
|
|
|
),
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::Table(_headers, cells) => DbgDocBldr::delimit(
|
2020-08-30 06:55:33 +02:00
|
|
|
"[",
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::intersperse(
|
2020-08-30 06:55:33 +02:00
|
|
|
cells
|
|
|
|
.iter()
|
|
|
|
.map(|row| {
|
|
|
|
row.iter()
|
|
|
|
.map(|item| item.refined_pretty_debug(refine, source))
|
|
|
|
})
|
|
|
|
.flatten(),
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::space(),
|
2020-08-30 06:55:33 +02:00
|
|
|
),
|
|
|
|
"]",
|
|
|
|
),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Path(path) => path.pretty_debug(source),
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::FilePath(path) => {
|
|
|
|
DbgDocBldr::typed("path", DbgDocBldr::primitive(path.display()))
|
|
|
|
}
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::ExternalCommand(external) => {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::keyword("^") + DbgDocBldr::keyword(external.name.span.slice(source))
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
}
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::Command => DbgDocBldr::keyword(self.span.slice(source)),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Boolean(boolean) => match boolean {
|
2021-01-29 14:43:35 +01:00
|
|
|
true => DbgDocBldr::primitive("$yes"),
|
|
|
|
false => DbgDocBldr::primitive("$no"),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-21 15:33:14 +01:00
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
match &self.expr {
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Literal(literal) => {
|
|
|
|
literal.clone().into_spanned(self.span).pretty_debug(source)
|
|
|
|
}
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::ExternalWord => DbgDocBldr::typed(
|
|
|
|
"external word",
|
|
|
|
DbgDocBldr::primitive(self.span.slice(source)),
|
|
|
|
),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Synthetic(s) => match s {
|
2021-01-29 14:43:35 +01:00
|
|
|
Synthetic::String(s) => {
|
|
|
|
DbgDocBldr::typed("synthetic", DbgDocBldr::primitive(format!("{:?}", s)))
|
|
|
|
}
|
2019-11-21 15:33:14 +01:00
|
|
|
},
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::Variable(_, _) => DbgDocBldr::keyword(self.span.slice(source)),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Binary(binary) => binary.pretty_debug(source),
|
|
|
|
Expression::Range(range) => range.pretty_debug(source),
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::Block(_) => DbgDocBldr::opaque("block"),
|
|
|
|
Expression::Invocation(_) => DbgDocBldr::opaque("invocation"),
|
|
|
|
Expression::Garbage => DbgDocBldr::opaque("garbage"),
|
|
|
|
Expression::List(list) => DbgDocBldr::delimit(
|
2019-11-21 15:33:14 +01:00
|
|
|
"[",
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::intersperse(
|
2019-11-21 15:33:14 +01:00
|
|
|
list.iter().map(|item| item.pretty_debug(source)),
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::space(),
|
2019-11-21 15:33:14 +01:00
|
|
|
),
|
|
|
|
"]",
|
|
|
|
),
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::Table(_headers, cells) => DbgDocBldr::delimit(
|
2020-08-30 06:55:33 +02:00
|
|
|
"[",
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::intersperse(
|
2020-08-30 06:55:33 +02:00
|
|
|
cells
|
|
|
|
.iter()
|
|
|
|
.map(|row| row.iter().map(|item| item.pretty_debug(source)))
|
|
|
|
.flatten(),
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::space(),
|
2020-08-30 06:55:33 +02:00
|
|
|
),
|
|
|
|
"]",
|
|
|
|
),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Path(path) => path.pretty_debug(source),
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::FilePath(path) => {
|
|
|
|
DbgDocBldr::typed("path", DbgDocBldr::primitive(path.display()))
|
|
|
|
}
|
|
|
|
Expression::ExternalCommand(external) => DbgDocBldr::typed(
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
"command",
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::keyword("^") + DbgDocBldr::primitive(external.name.span.slice(source)),
|
2019-11-21 15:33:14 +01:00
|
|
|
),
|
2021-01-29 14:43:35 +01:00
|
|
|
Expression::Command => {
|
|
|
|
DbgDocBldr::typed("command", DbgDocBldr::primitive(self.span.slice(source)))
|
|
|
|
}
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Expression::Boolean(boolean) => match boolean {
|
2021-01-29 14:43:35 +01:00
|
|
|
true => DbgDocBldr::primitive("$yes"),
|
|
|
|
false => DbgDocBldr::primitive("$no"),
|
2019-11-21 15:33:14 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Clone, Copy, PartialOrd, Ord, Eq, Hash, PartialEq, Deserialize, Serialize)]
|
2020-04-18 03:50:58 +02:00
|
|
|
pub enum Operator {
|
2020-04-06 09:16:14 +02:00
|
|
|
Equal,
|
|
|
|
NotEqual,
|
|
|
|
LessThan,
|
|
|
|
GreaterThan,
|
|
|
|
LessThanOrEqual,
|
|
|
|
GreaterThanOrEqual,
|
|
|
|
Contains,
|
|
|
|
NotContains,
|
2020-04-18 03:50:58 +02:00
|
|
|
Plus,
|
|
|
|
Minus,
|
|
|
|
Multiply,
|
|
|
|
Divide,
|
|
|
|
In,
|
2020-04-26 07:32:17 +02:00
|
|
|
NotIn,
|
2020-09-07 02:12:55 +02:00
|
|
|
Modulo,
|
2020-04-18 03:50:58 +02:00
|
|
|
And,
|
|
|
|
Or,
|
2021-01-29 14:44:02 +01:00
|
|
|
Pow,
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize, new)]
|
|
|
|
pub struct Binary {
|
|
|
|
pub left: SpannedExpression,
|
2020-04-18 03:50:58 +02:00
|
|
|
pub op: SpannedExpression,
|
2020-04-06 09:16:14 +02:00
|
|
|
pub right: SpannedExpression,
|
|
|
|
}
|
2019-11-04 16:47:03 +01:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebugWithSource for Binary {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::delimit(
|
2020-04-06 09:16:14 +02:00
|
|
|
"<",
|
|
|
|
self.left.pretty_debug(source)
|
2021-01-29 14:43:35 +01:00
|
|
|
+ DbgDocBldr::space()
|
|
|
|
+ DbgDocBldr::keyword(self.op.span.slice(source))
|
|
|
|
+ DbgDocBldr::space()
|
2020-04-06 09:16:14 +02:00
|
|
|
+ self.right.pretty_debug(source),
|
|
|
|
">",
|
|
|
|
)
|
|
|
|
.group()
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
}
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub enum Synthetic {
|
|
|
|
String(String),
|
|
|
|
}
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl ShellTypeName for Synthetic {
|
|
|
|
fn type_name(&self) -> &'static str {
|
|
|
|
match self {
|
|
|
|
Synthetic::String(_) => "string",
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
}
|
|
|
|
}
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub struct Range {
|
2020-09-07 04:43:58 +02:00
|
|
|
pub left: Option<SpannedExpression>,
|
2020-09-13 23:53:08 +02:00
|
|
|
pub operator: Spanned<RangeOperator>,
|
2020-09-07 04:43:58 +02:00
|
|
|
pub right: Option<SpannedExpression>,
|
2019-07-24 00:22:11 +02:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebugWithSource for Range {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::delimit(
|
2020-04-06 09:16:14 +02:00
|
|
|
"<",
|
2020-09-07 04:43:58 +02:00
|
|
|
(if let Some(left) = &self.left {
|
|
|
|
left.pretty_debug(source)
|
|
|
|
} else {
|
|
|
|
DebugDocBuilder::blank()
|
2021-01-29 14:43:35 +01:00
|
|
|
}) + DbgDocBldr::space()
|
|
|
|
+ DbgDocBldr::keyword(self.operator.span().slice(source))
|
|
|
|
+ DbgDocBldr::space()
|
2020-09-07 04:43:58 +02:00
|
|
|
+ (if let Some(right) = &self.right {
|
|
|
|
right.pretty_debug(source)
|
|
|
|
} else {
|
|
|
|
DebugDocBuilder::blank()
|
|
|
|
}),
|
2020-04-06 09:16:14 +02:00
|
|
|
">",
|
|
|
|
)
|
|
|
|
.group()
|
2019-06-29 10:55:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-13 23:53:08 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub enum RangeOperator {
|
|
|
|
Inclusive,
|
|
|
|
RightExclusive,
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
pub enum Literal {
|
2019-08-30 19:29:04 +02:00
|
|
|
Number(Number),
|
2020-04-06 09:16:14 +02:00
|
|
|
Size(Spanned<Number>, Spanned<Unit>),
|
2020-04-18 03:50:58 +02:00
|
|
|
Operator(Operator),
|
2020-04-06 09:16:14 +02:00
|
|
|
String(String),
|
2019-10-27 02:01:58 +01:00
|
|
|
GlobPattern(String),
|
2019-11-04 16:47:03 +01:00
|
|
|
ColumnPath(Vec<Member>),
|
2020-04-10 09:56:48 +02:00
|
|
|
Bare(String),
|
2019-06-22 03:36:57 +02:00
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
impl Literal {
|
|
|
|
pub fn into_spanned(self, span: impl Into<Span>) -> SpannedLiteral {
|
|
|
|
SpannedLiteral {
|
2019-11-21 15:33:14 +01:00
|
|
|
literal: self,
|
|
|
|
span: span.into(),
|
|
|
|
}
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
//, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize
|
|
|
|
#[derive(Debug, Clone)]
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
pub struct SpannedLiteral {
|
|
|
|
pub literal: Literal,
|
2019-11-21 15:33:14 +01:00
|
|
|
pub span: Span,
|
Overhaul the expansion system
The main thrust of this (very large) commit is an overhaul of the
expansion system.
The parsing pipeline is:
- Lightly parse the source file for atoms, basic delimiters and pipeline
structure into a token tree
- Expand the token tree into a HIR (high-level intermediate
representation) based upon the baseline syntax rules for expressions
and the syntactic shape of commands.
Somewhat non-traditionally, nu doesn't have an AST at all. It goes
directly from the token tree, which doesn't represent many important
distinctions (like the difference between `hello` and `5KB`) directly
into a high-level representation that doesn't have a direct
correspondence to the source code.
At a high level, nu commands work like macros, in the sense that the
syntactic shape of the invocation of a command depends on the
definition of a command.
However, commands do not have the ability to perform unrestricted
expansions of the token tree. Instead, they describe their arguments in
terms of syntactic shapes, and the expander expands the token tree into
HIR based upon that definition.
For example, the `where` command says that it takes a block as its first
required argument, and the description of the block syntactic shape
expands the syntax `cpu > 10` into HIR that represents
`{ $it.cpu > 10 }`.
This commit overhauls that system so that the syntactic shapes are
described in terms of a few new traits (`ExpandSyntax` and
`ExpandExpression` are the primary ones) that are more composable than
the previous system.
The first big win of this new system is the addition of the `ColumnPath`
shape, which looks like `cpu."max ghz"` or `package.version`.
Previously, while a variable path could look like `$it.cpu."max ghz"`,
the tail of a variable path could not be easily reused in other
contexts. Now, that tail is its own syntactic shape, and it can be used
as part of a command's signature.
This cleans up commands like `inc`, `add` and `edit` as well as
shorthand blocks, which can now look like `| where cpu."max ghz" > 10`
2019-09-18 00:26:27 +02:00
|
|
|
}
|
|
|
|
|
2019-11-21 15:33:14 +01:00
|
|
|
impl ShellTypeName for Literal {
|
|
|
|
fn type_name(&self) -> &'static str {
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
match &self {
|
|
|
|
Literal::Number(..) => "number",
|
|
|
|
Literal::Size(..) => "size",
|
|
|
|
Literal::String(..) => "string",
|
|
|
|
Literal::ColumnPath(..) => "column path",
|
2020-04-10 09:56:48 +02:00
|
|
|
Literal::Bare(_) => "string",
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Literal::GlobPattern(_) => "pattern",
|
2020-04-06 09:16:14 +02:00
|
|
|
Literal::Operator(_) => "operator",
|
2019-07-24 00:22:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
impl PrettyDebugWithSource for SpannedLiteral {
|
|
|
|
fn refined_pretty_debug(&self, refine: PrettyDebugRefineKind, source: &str) -> DebugDocBuilder {
|
|
|
|
match refine {
|
|
|
|
PrettyDebugRefineKind::ContextFree => self.pretty_debug(source),
|
|
|
|
PrettyDebugRefineKind::WithContext => match &self.literal {
|
|
|
|
Literal::Number(number) => number.pretty(),
|
|
|
|
Literal::Size(number, unit) => (number.pretty() + unit.pretty()).group(),
|
2021-01-29 14:43:35 +01:00
|
|
|
Literal::String(string) => DbgDocBldr::primitive(format!("{:?}", string)), //string.slice(source))),
|
|
|
|
Literal::GlobPattern(pattern) => DbgDocBldr::primitive(pattern),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Literal::ColumnPath(path) => {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::intersperse_with_source(path.iter(), DbgDocBldr::space(), source)
|
|
|
|
}
|
|
|
|
Literal::Bare(bare) => {
|
|
|
|
DbgDocBldr::delimit("b\"", DbgDocBldr::primitive(bare), "\"")
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
}
|
2021-01-29 14:43:35 +01:00
|
|
|
Literal::Operator(operator) => DbgDocBldr::primitive(format!("{:?}", operator)),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-21 15:33:14 +01:00
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
match &self.literal {
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
Literal::Number(number) => number.pretty(),
|
|
|
|
Literal::Size(number, unit) => {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::typed("size", (number.pretty() + unit.pretty()).group())
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
}
|
2021-01-29 14:43:35 +01:00
|
|
|
Literal::String(string) => DbgDocBldr::typed(
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
"string",
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::primitive(format!("{:?}", string)), //string.slice(source))),
|
Restructure and streamline token expansion (#1123)
Restructure and streamline token expansion
The purpose of this commit is to streamline the token expansion code, by
removing aspects of the code that are no longer relevant, removing
pointless duplication, and eliminating the need to pass the same
arguments to `expand_syntax`.
The first big-picture change in this commit is that instead of a handful
of `expand_` functions, which take a TokensIterator and ExpandContext, a
smaller number of methods on the `TokensIterator` do the same job.
The second big-picture change in this commit is fully eliminating the
coloring traits, making coloring a responsibility of the base expansion
implementations. This also means that the coloring tracer is merged into
the expansion tracer, so you can follow a single expansion and see how
the expansion process produced colored tokens.
One side effect of this change is that the expander itself is marginally
more error-correcting. The error correction works by switching from
structured expansion to `BackoffColoringMode` when an unexpected token
is found, which guarantees that all spans of the source are colored, but
may not be the most optimal error recovery strategy.
That said, because `BackoffColoringMode` only extends as far as a
closing delimiter (`)`, `]`, `}`) or pipe (`|`), it does result in
fairly granular correction strategy.
The current code still produces an `Err` (plus a complete list of
colored shapes) from the parsing process if any errors are encountered,
but this could easily be addressed now that the underlying expansion is
error-correcting.
This commit also colors any spans that are syntax errors in red, and
causes the parser to include some additional information about what
tokens were expected at any given point where an error was encountered,
so that completions and hinting could be more robust in the future.
Co-authored-by: Jonathan Turner <jonathandturner@users.noreply.github.com>
Co-authored-by: Andrés N. Robalino <andres@androbtech.com>
2020-01-21 23:45:03 +01:00
|
|
|
),
|
2021-01-29 14:43:35 +01:00
|
|
|
Literal::GlobPattern(pattern) => {
|
|
|
|
DbgDocBldr::typed("pattern", DbgDocBldr::primitive(pattern))
|
|
|
|
}
|
|
|
|
Literal::ColumnPath(path) => DbgDocBldr::typed(
|
2019-11-21 15:33:14 +01:00
|
|
|
"column path",
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::intersperse_with_source(path.iter(), DbgDocBldr::space(), source),
|
2019-11-21 15:33:14 +01:00
|
|
|
),
|
2021-01-29 14:43:35 +01:00
|
|
|
Literal::Bare(bare) => DbgDocBldr::typed("bare", DbgDocBldr::primitive(bare)),
|
2020-04-06 09:16:14 +02:00
|
|
|
Literal::Operator(operator) => {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::typed("operator", DbgDocBldr::primitive(format!("{:?}", operator)))
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
2019-06-30 08:14:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, new, Deserialize, Serialize)]
|
|
|
|
pub struct Path {
|
|
|
|
pub head: SpannedExpression,
|
|
|
|
pub tail: Vec<PathMember>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for Path {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
self.head.pretty_debug(source)
|
2021-01-29 14:43:35 +01:00
|
|
|
+ DbgDocBldr::operator(".")
|
|
|
|
+ DbgDocBldr::intersperse(
|
|
|
|
self.tail.iter().map(|m| m.pretty()),
|
|
|
|
DbgDocBldr::operator("."),
|
|
|
|
)
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Hash, Deserialize, Serialize)]
|
|
|
|
pub enum Expression {
|
|
|
|
Literal(Literal),
|
|
|
|
ExternalWord,
|
|
|
|
Synthetic(Synthetic),
|
2020-10-26 07:55:52 +01:00
|
|
|
Variable(String, Span),
|
2020-04-06 09:16:14 +02:00
|
|
|
Binary(Box<Binary>),
|
|
|
|
Range(Box<Range>),
|
2020-04-20 08:41:51 +02:00
|
|
|
Block(hir::Block),
|
2020-04-06 09:16:14 +02:00
|
|
|
List(Vec<SpannedExpression>),
|
2020-08-30 06:55:33 +02:00
|
|
|
Table(Vec<SpannedExpression>, Vec<Vec<SpannedExpression>>),
|
2020-04-06 09:16:14 +02:00
|
|
|
Path(Box<Path>),
|
|
|
|
|
|
|
|
FilePath(PathBuf),
|
2020-04-13 09:59:57 +02:00
|
|
|
ExternalCommand(ExternalStringCommand),
|
2020-08-25 03:47:58 +02:00
|
|
|
Command,
|
2020-05-16 05:18:24 +02:00
|
|
|
Invocation(hir::Block),
|
2020-04-06 09:16:14 +02:00
|
|
|
|
|
|
|
Boolean(bool),
|
|
|
|
|
|
|
|
// Trying this approach out: if we let parsing always be infallible
|
|
|
|
// we can use the same parse and just place bad token markers in the output
|
|
|
|
// We can later throw an error if we try to process them further.
|
|
|
|
Garbage,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ShellTypeName for Expression {
|
|
|
|
fn type_name(&self) -> &'static str {
|
|
|
|
match self {
|
|
|
|
Expression::Literal(literal) => literal.type_name(),
|
|
|
|
Expression::Synthetic(synthetic) => synthetic.type_name(),
|
2020-08-25 03:47:58 +02:00
|
|
|
Expression::Command => "command",
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::ExternalWord => "external word",
|
|
|
|
Expression::FilePath(..) => "file path",
|
|
|
|
Expression::Variable(..) => "variable",
|
|
|
|
Expression::List(..) => "list",
|
2020-08-30 06:55:33 +02:00
|
|
|
Expression::Table(..) => "table",
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Binary(..) => "binary",
|
|
|
|
Expression::Range(..) => "range",
|
|
|
|
Expression::Block(..) => "block",
|
2020-05-16 05:18:24 +02:00
|
|
|
Expression::Invocation(..) => "command invocation",
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Path(..) => "variable path",
|
|
|
|
Expression::Boolean(..) => "boolean",
|
|
|
|
Expression::ExternalCommand(..) => "external",
|
|
|
|
Expression::Garbage => "garbage",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl IntoSpanned for Expression {
|
|
|
|
type Output = SpannedExpression;
|
|
|
|
|
|
|
|
fn into_spanned(self, span: impl Into<Span>) -> Self::Output {
|
|
|
|
SpannedExpression {
|
|
|
|
expr: self,
|
|
|
|
span: span.into(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Expression {
|
2020-10-06 19:30:18 +02:00
|
|
|
pub fn integer(i: BigInt) -> Expression {
|
|
|
|
Expression::Literal(Literal::Number(Number::Int(i)))
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
2020-10-07 03:01:40 +02:00
|
|
|
pub fn decimal(dec: BigDecimal) -> Expression {
|
|
|
|
Expression::Literal(Literal::Number(Number::Decimal(dec)))
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn string(s: String) -> Expression {
|
|
|
|
Expression::Literal(Literal::String(s))
|
|
|
|
}
|
|
|
|
|
2020-04-18 03:50:58 +02:00
|
|
|
pub fn operator(operator: Operator) -> Expression {
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Literal(Literal::Operator(operator))
|
|
|
|
}
|
|
|
|
|
2020-09-07 04:43:58 +02:00
|
|
|
pub fn range(
|
|
|
|
left: Option<SpannedExpression>,
|
2020-09-13 23:53:08 +02:00
|
|
|
operator: Spanned<RangeOperator>,
|
2020-09-07 04:43:58 +02:00
|
|
|
right: Option<SpannedExpression>,
|
|
|
|
) -> Expression {
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Range(Box::new(Range {
|
|
|
|
left,
|
2020-09-13 23:53:08 +02:00
|
|
|
operator,
|
2020-04-06 09:16:14 +02:00
|
|
|
right,
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2021-01-08 08:30:41 +01:00
|
|
|
pub fn glob_pattern(p: String) -> Expression {
|
2020-04-06 09:16:14 +02:00
|
|
|
Expression::Literal(Literal::GlobPattern(p))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn file_path(file_path: PathBuf) -> Expression {
|
|
|
|
Expression::FilePath(file_path)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn simple_column_path(members: Vec<Member>) -> Expression {
|
|
|
|
Expression::Literal(Literal::ColumnPath(members))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn path(head: SpannedExpression, tail: Vec<impl Into<PathMember>>) -> Expression {
|
|
|
|
let tail = tail.into_iter().map(|t| t.into()).collect();
|
|
|
|
Expression::Path(Box::new(Path::new(head, tail)))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn unit(i: Spanned<i64>, unit: Spanned<Unit>) -> Expression {
|
|
|
|
Expression::Literal(Literal::Size(
|
|
|
|
Number::Int(BigInt::from(i.item)).spanned(i.span),
|
|
|
|
unit,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn variable(v: String, span: Span) -> Expression {
|
2020-10-26 07:55:52 +01:00
|
|
|
Expression::Variable(v, span)
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
2020-10-19 09:03:14 +02:00
|
|
|
|
|
|
|
pub fn boolean(b: bool) -> Expression {
|
|
|
|
Expression::Boolean(b)
|
|
|
|
}
|
2020-10-27 08:37:35 +01:00
|
|
|
|
|
|
|
pub fn has_it_usage(&self) -> bool {
|
|
|
|
match self {
|
|
|
|
Expression::Variable(name, _) if name == "$it" => true,
|
|
|
|
Expression::Table(headers, values) => {
|
|
|
|
headers.iter().any(|se| se.has_it_usage())
|
|
|
|
|| values.iter().any(|v| v.iter().any(|se| se.has_it_usage()))
|
|
|
|
}
|
|
|
|
Expression::List(list) => list.iter().any(|se| se.has_it_usage()),
|
|
|
|
Expression::Invocation(block) => block.has_it_usage(),
|
|
|
|
Expression::Binary(binary) => binary.left.has_it_usage() || binary.right.has_it_usage(),
|
|
|
|
Expression::Path(path) => path.head.has_it_usage(),
|
|
|
|
Expression::Range(range) => {
|
|
|
|
(if let Some(left) = &range.left {
|
|
|
|
left.has_it_usage()
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}) || (if let Some(right) = &range.right {
|
|
|
|
right.has_it_usage()
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
})
|
|
|
|
}
|
|
|
|
_ => false,
|
|
|
|
}
|
|
|
|
}
|
2020-12-18 08:53:49 +01:00
|
|
|
|
|
|
|
pub fn get_free_variables(&self, known_variables: &mut Vec<String>) -> Vec<String> {
|
|
|
|
let mut output = vec![];
|
|
|
|
match self {
|
|
|
|
Expression::Variable(name, _) => {
|
|
|
|
if !known_variables.contains(name) {
|
|
|
|
output.push(name.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Expression::Table(headers, values) => {
|
|
|
|
for header in headers {
|
|
|
|
output.extend(header.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
for row in values {
|
|
|
|
for value in row {
|
|
|
|
output.extend(value.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Expression::List(list) => {
|
|
|
|
for item in list {
|
|
|
|
output.extend(item.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
}
|
2021-01-19 19:21:11 +01:00
|
|
|
Expression::Invocation(block) | Expression::Block(block) => {
|
2020-12-18 08:53:49 +01:00
|
|
|
output.extend(block.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
Expression::Binary(binary) => {
|
|
|
|
output.extend(binary.left.get_free_variables(known_variables));
|
|
|
|
output.extend(binary.right.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
Expression::Path(path) => {
|
|
|
|
output.extend(path.head.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
Expression::Range(range) => {
|
|
|
|
if let Some(left) = &range.left {
|
|
|
|
output.extend(left.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
if let Some(right) = &range.right {
|
|
|
|
output.extend(right.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
output
|
|
|
|
}
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
2020-04-06 09:16:14 +02:00
|
|
|
pub enum NamedValue {
|
|
|
|
AbsentSwitch,
|
|
|
|
PresentSwitch(Span),
|
|
|
|
AbsentValue,
|
2020-12-18 08:53:49 +01:00
|
|
|
Value(Span, Box<SpannedExpression>),
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
2020-10-27 08:37:35 +01:00
|
|
|
impl NamedValue {
|
|
|
|
fn has_it_usage(&self) -> bool {
|
|
|
|
if let NamedValue::Value(_, se) = self {
|
|
|
|
se.has_it_usage()
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
2020-12-18 08:53:49 +01:00
|
|
|
pub fn get_free_variables(&self, known_variables: &mut Vec<String>) -> Vec<String> {
|
|
|
|
if let NamedValue::Value(_, se) = self {
|
|
|
|
se.get_free_variables(known_variables)
|
|
|
|
} else {
|
|
|
|
vec![]
|
|
|
|
}
|
|
|
|
}
|
2020-10-27 08:37:35 +01:00
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl PrettyDebugWithSource for NamedValue {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
match self {
|
2021-01-29 14:43:35 +01:00
|
|
|
NamedValue::AbsentSwitch => {
|
|
|
|
DbgDocBldr::typed("switch", DbgDocBldr::description("absent"))
|
|
|
|
}
|
|
|
|
NamedValue::PresentSwitch(_) => {
|
|
|
|
DbgDocBldr::typed("switch", DbgDocBldr::description("present"))
|
|
|
|
}
|
|
|
|
NamedValue::AbsentValue => DbgDocBldr::description("absent"),
|
2020-04-06 09:16:14 +02:00
|
|
|
NamedValue::Value(_, value) => value.pretty_debug(source),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn refined_pretty_debug(&self, refine: PrettyDebugRefineKind, source: &str) -> DebugDocBuilder {
|
|
|
|
match refine {
|
|
|
|
PrettyDebugRefineKind::ContextFree => self.pretty_debug(source),
|
|
|
|
PrettyDebugRefineKind::WithContext => match self {
|
2021-01-29 14:43:35 +01:00
|
|
|
NamedValue::AbsentSwitch => DbgDocBldr::value("absent"),
|
|
|
|
NamedValue::PresentSwitch(_) => DbgDocBldr::value("present"),
|
|
|
|
NamedValue::AbsentValue => DbgDocBldr::value("absent"),
|
2020-04-06 09:16:14 +02:00
|
|
|
NamedValue::Value(_, value) => value.refined_pretty_debug(refine, source),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-07 06:53:37 +02:00
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
|
|
|
pub enum ExternalRedirection {
|
|
|
|
None,
|
|
|
|
Stdout,
|
|
|
|
Stderr,
|
|
|
|
StdoutAndStderr,
|
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
2020-04-06 09:16:14 +02:00
|
|
|
pub struct Call {
|
|
|
|
pub head: Box<SpannedExpression>,
|
|
|
|
pub positional: Option<Vec<SpannedExpression>>,
|
|
|
|
pub named: Option<NamedArguments>,
|
|
|
|
pub span: Span,
|
2020-08-07 06:53:37 +02:00
|
|
|
pub external_redirection: ExternalRedirection,
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Call {
|
|
|
|
pub fn switch_preset(&self, switch: &str) -> bool {
|
|
|
|
self.named
|
|
|
|
.as_ref()
|
|
|
|
.map(|n| n.switch_present(switch))
|
|
|
|
.unwrap_or(false)
|
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
pub fn set_initial_flags(&mut self, signature: &crate::Signature) {
|
2020-04-06 09:16:14 +02:00
|
|
|
for (named, value) in signature.named.iter() {
|
|
|
|
if self.named.is_none() {
|
|
|
|
self.named = Some(NamedArguments::new());
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(ref mut args) = self.named {
|
|
|
|
match value.0 {
|
2020-04-13 09:59:57 +02:00
|
|
|
crate::NamedType::Switch(_) => args.insert_switch(named, None),
|
2020-04-06 09:16:14 +02:00
|
|
|
_ => args.insert_optional(named, Span::new(0, 0), None),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-10-27 08:37:35 +01:00
|
|
|
|
|
|
|
pub fn has_it_usage(&self) -> bool {
|
|
|
|
self.head.has_it_usage()
|
|
|
|
|| (if let Some(pos) = &self.positional {
|
|
|
|
pos.iter().any(|x| x.has_it_usage())
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
})
|
|
|
|
|| (if let Some(named) = &self.named {
|
|
|
|
named.has_it_usage()
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
})
|
|
|
|
}
|
2020-12-18 08:53:49 +01:00
|
|
|
|
|
|
|
pub fn get_free_variables(&self, known_variables: &mut Vec<String>) -> Vec<String> {
|
|
|
|
let mut free_variables = vec![];
|
|
|
|
|
|
|
|
free_variables.extend(self.head.get_free_variables(known_variables));
|
|
|
|
if let Some(pos) = &self.positional {
|
|
|
|
for pos in pos {
|
|
|
|
free_variables.extend(pos.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(named) = &self.named {
|
|
|
|
free_variables.extend(named.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
|
|
|
|
free_variables
|
|
|
|
}
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for Call {
|
|
|
|
fn refined_pretty_debug(&self, refine: PrettyDebugRefineKind, source: &str) -> DebugDocBuilder {
|
|
|
|
match refine {
|
|
|
|
PrettyDebugRefineKind::ContextFree => self.pretty_debug(source),
|
|
|
|
PrettyDebugRefineKind::WithContext => {
|
|
|
|
self.head
|
|
|
|
.refined_pretty_debug(PrettyDebugRefineKind::WithContext, source)
|
2021-01-29 14:43:35 +01:00
|
|
|
+ DbgDocBldr::preceded_option(
|
|
|
|
Some(DbgDocBldr::space()),
|
2020-04-06 09:16:14 +02:00
|
|
|
self.positional.as_ref().map(|pos| {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::intersperse(
|
2020-04-06 09:16:14 +02:00
|
|
|
pos.iter().map(|expr| {
|
|
|
|
expr.refined_pretty_debug(
|
|
|
|
PrettyDebugRefineKind::WithContext,
|
|
|
|
source,
|
|
|
|
)
|
|
|
|
}),
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::space(),
|
2020-04-06 09:16:14 +02:00
|
|
|
)
|
|
|
|
}),
|
|
|
|
)
|
2021-01-29 14:43:35 +01:00
|
|
|
+ DbgDocBldr::preceded_option(
|
|
|
|
Some(DbgDocBldr::space()),
|
2020-04-06 09:16:14 +02:00
|
|
|
self.named.as_ref().map(|named| {
|
|
|
|
named.refined_pretty_debug(PrettyDebugRefineKind::WithContext, source)
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::typed(
|
2020-04-06 09:16:14 +02:00
|
|
|
"call",
|
|
|
|
self.refined_pretty_debug(PrettyDebugRefineKind::WithContext, source),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Call {
|
|
|
|
pub fn new(head: Box<SpannedExpression>, span: Span) -> Call {
|
|
|
|
Call {
|
|
|
|
head,
|
|
|
|
positional: None,
|
|
|
|
named: None,
|
|
|
|
span,
|
2020-08-07 06:53:37 +02:00
|
|
|
external_redirection: ExternalRedirection::Stdout,
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
|
|
|
|
pub enum Delimiter {
|
|
|
|
Paren,
|
|
|
|
Brace,
|
|
|
|
Square,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Copy, Clone)]
|
|
|
|
pub enum FlatShape {
|
|
|
|
OpenDelimiter(Delimiter),
|
|
|
|
CloseDelimiter(Delimiter),
|
|
|
|
Type,
|
|
|
|
Identifier,
|
|
|
|
ItVariable,
|
|
|
|
Variable,
|
2020-04-18 03:50:58 +02:00
|
|
|
Operator,
|
2020-04-06 09:16:14 +02:00
|
|
|
Dot,
|
|
|
|
DotDot,
|
2020-09-13 23:53:08 +02:00
|
|
|
DotDotLeftAngleBracket,
|
2020-04-06 09:16:14 +02:00
|
|
|
InternalCommand,
|
|
|
|
ExternalCommand,
|
|
|
|
ExternalWord,
|
|
|
|
BareMember,
|
|
|
|
StringMember,
|
|
|
|
String,
|
|
|
|
Path,
|
|
|
|
Word,
|
|
|
|
Keyword,
|
|
|
|
Pipe,
|
|
|
|
GlobPattern,
|
|
|
|
Flag,
|
|
|
|
ShorthandFlag,
|
|
|
|
Int,
|
|
|
|
Decimal,
|
|
|
|
Garbage,
|
|
|
|
Whitespace,
|
|
|
|
Separator,
|
|
|
|
Comment,
|
|
|
|
Size { number: Span, unit: Span },
|
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
|
2020-04-06 09:16:14 +02:00
|
|
|
pub struct NamedArguments {
|
|
|
|
pub named: IndexMap<String, NamedValue>,
|
|
|
|
}
|
|
|
|
|
2020-04-13 09:59:57 +02:00
|
|
|
#[allow(clippy::derive_hash_xor_eq)]
|
|
|
|
impl Hash for NamedArguments {
|
|
|
|
/// Create the hash function to allow the Hash trait for dictionaries
|
|
|
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
|
|
|
let mut entries = self.named.clone();
|
|
|
|
entries.sort_keys();
|
|
|
|
entries.keys().collect::<Vec<&String>>().hash(state);
|
|
|
|
entries.values().collect::<Vec<&NamedValue>>().hash(state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PartialOrd for NamedArguments {
|
|
|
|
/// Compare two dictionaries for sort ordering
|
|
|
|
fn partial_cmp(&self, other: &NamedArguments) -> Option<Ordering> {
|
|
|
|
let this: Vec<&String> = self.named.keys().collect();
|
|
|
|
let that: Vec<&String> = other.named.keys().collect();
|
|
|
|
|
|
|
|
if this != that {
|
|
|
|
return this.partial_cmp(&that);
|
|
|
|
}
|
|
|
|
|
|
|
|
let this: Vec<&NamedValue> = self.named.values().collect();
|
2020-12-18 08:53:49 +01:00
|
|
|
let that: Vec<&NamedValue> = other.named.values().collect();
|
2020-04-13 09:59:57 +02:00
|
|
|
|
|
|
|
this.partial_cmp(&that)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Ord for NamedArguments {
|
|
|
|
/// Compare two dictionaries for ordering
|
|
|
|
fn cmp(&self, other: &NamedArguments) -> Ordering {
|
|
|
|
let this: Vec<&String> = self.named.keys().collect();
|
|
|
|
let that: Vec<&String> = other.named.keys().collect();
|
|
|
|
|
|
|
|
if this != that {
|
|
|
|
return this.cmp(&that);
|
|
|
|
}
|
|
|
|
|
|
|
|
let this: Vec<&NamedValue> = self.named.values().collect();
|
2020-12-18 08:53:49 +01:00
|
|
|
let that: Vec<&NamedValue> = other.named.values().collect();
|
2020-04-13 09:59:57 +02:00
|
|
|
|
|
|
|
this.cmp(&that)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-06 09:16:14 +02:00
|
|
|
impl NamedArguments {
|
|
|
|
pub fn new() -> NamedArguments {
|
|
|
|
Default::default()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn iter(&self) -> impl Iterator<Item = (&String, &NamedValue)> {
|
|
|
|
self.named.iter()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get(&self, name: &str) -> Option<&NamedValue> {
|
|
|
|
self.named.get(name)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_empty(&self) -> bool {
|
|
|
|
self.named.is_empty()
|
|
|
|
}
|
2020-10-27 08:37:35 +01:00
|
|
|
|
|
|
|
pub fn has_it_usage(&self) -> bool {
|
|
|
|
self.iter().any(|x| x.1.has_it_usage())
|
|
|
|
}
|
2020-12-18 08:53:49 +01:00
|
|
|
|
|
|
|
pub fn get_free_variables(&self, known_variables: &mut Vec<String>) -> Vec<String> {
|
|
|
|
let mut free_variables = vec![];
|
|
|
|
for (_, val) in self.named.iter() {
|
|
|
|
free_variables.extend(val.get_free_variables(known_variables));
|
|
|
|
}
|
|
|
|
free_variables
|
|
|
|
}
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NamedArguments {
|
|
|
|
pub fn insert_switch(&mut self, name: impl Into<String>, switch: Option<Flag>) {
|
|
|
|
let name = name.into();
|
|
|
|
trace!("Inserting switch -- {} = {:?}", name, switch);
|
|
|
|
|
|
|
|
match switch {
|
|
|
|
None => self.named.insert(name, NamedValue::AbsentSwitch),
|
|
|
|
Some(flag) => self
|
|
|
|
.named
|
|
|
|
.insert(name, NamedValue::PresentSwitch(flag.name)),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn insert_optional(
|
|
|
|
&mut self,
|
|
|
|
name: impl Into<String>,
|
|
|
|
flag_span: Span,
|
|
|
|
expr: Option<SpannedExpression>,
|
|
|
|
) {
|
|
|
|
match expr {
|
|
|
|
None => self.named.insert(name.into(), NamedValue::AbsentValue),
|
|
|
|
Some(expr) => self
|
|
|
|
.named
|
2020-12-18 08:53:49 +01:00
|
|
|
.insert(name.into(), NamedValue::Value(flag_span, Box::new(expr))),
|
2020-04-06 09:16:14 +02:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn insert_mandatory(
|
|
|
|
&mut self,
|
|
|
|
name: impl Into<String>,
|
|
|
|
flag_span: Span,
|
|
|
|
expr: SpannedExpression,
|
|
|
|
) {
|
|
|
|
self.named
|
2020-12-18 08:53:49 +01:00
|
|
|
.insert(name.into(), NamedValue::Value(flag_span, Box::new(expr)));
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn switch_present(&self, switch: &str) -> bool {
|
|
|
|
self.named
|
|
|
|
.get(switch)
|
2020-07-15 02:11:41 +02:00
|
|
|
.map(|t| matches!(t, NamedValue::PresentSwitch(_)))
|
2020-04-06 09:16:14 +02:00
|
|
|
.unwrap_or(false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for NamedArguments {
|
|
|
|
fn refined_pretty_debug(&self, refine: PrettyDebugRefineKind, source: &str) -> DebugDocBuilder {
|
|
|
|
match refine {
|
|
|
|
PrettyDebugRefineKind::ContextFree => self.pretty_debug(source),
|
2021-01-29 14:43:35 +01:00
|
|
|
PrettyDebugRefineKind::WithContext => DbgDocBldr::intersperse(
|
2020-04-06 09:16:14 +02:00
|
|
|
self.named.iter().map(|(key, value)| {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::key(key)
|
|
|
|
+ DbgDocBldr::equals()
|
2020-04-06 09:16:14 +02:00
|
|
|
+ value.refined_pretty_debug(PrettyDebugRefineKind::WithContext, source)
|
|
|
|
}),
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::space(),
|
2020-04-06 09:16:14 +02:00
|
|
|
),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
2021-01-29 14:43:35 +01:00
|
|
|
DbgDocBldr::delimit(
|
2020-04-06 09:16:14 +02:00
|
|
|
"(",
|
|
|
|
self.refined_pretty_debug(PrettyDebugRefineKind::WithContext, source),
|
|
|
|
")",
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
|
|
|
pub enum FlagKind {
|
|
|
|
Shorthand,
|
|
|
|
Longhand,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, new)]
|
|
|
|
pub struct Flag {
|
|
|
|
pub(crate) kind: FlagKind,
|
|
|
|
pub(crate) name: Span,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PrettyDebugWithSource for Flag {
|
|
|
|
fn pretty_debug(&self, source: &str) -> DebugDocBuilder {
|
|
|
|
let prefix = match self.kind {
|
2021-01-29 14:43:35 +01:00
|
|
|
FlagKind::Longhand => DbgDocBldr::description("--"),
|
|
|
|
FlagKind::Shorthand => DbgDocBldr::description("-"),
|
2020-04-06 09:16:14 +02:00
|
|
|
};
|
|
|
|
|
2021-01-29 14:43:35 +01:00
|
|
|
prefix + DbgDocBldr::description(self.name.slice(source))
|
2020-04-06 09:16:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Flag {
|
|
|
|
pub fn color(&self, span: impl Into<Span>) -> Spanned<FlatShape> {
|
|
|
|
match self.kind {
|
|
|
|
FlagKind::Longhand => FlatShape::Flag.spanned(span.into()),
|
|
|
|
FlagKind::Shorthand => FlatShape::ShorthandFlag.spanned(span.into()),
|
|
|
|
}
|
|
|
|
}
|
2019-06-22 03:36:57 +02:00
|
|
|
}
|