History, more test coverage improvements, and refactorings. (#3217)

Improvements overall to Nu. Also among the changes here, we can also be more confident towards incorporating `3041`. End to end tests for checking envs properly exported to externals is not added here (since it's in the other PR)

A few things added in this PR (probably forgetting some too)

* no writes happen to history during test runs.
* environment syncing end to end coverage added.
* clean up / refactorings few areas.
* testing API for finer control (can write tests passing more than one pipeline)
* can pass environment variables in tests that nu will inherit when running.

* No longer needed.

* no longer under a module. No need to use super.
This commit is contained in:
Andrés N. Robalino
2021-03-27 00:08:03 -05:00
committed by GitHub
parent b243b3ee1d
commit 8fc8fc89aa
31 changed files with 483 additions and 241 deletions

View File

@ -509,7 +509,7 @@ mod tests {
#[cfg(feature = "which")]
use futures::executor::block_on;
#[cfg(feature = "which")]
use nu_engine::basic_evaluation_context;
use nu_engine::EvaluationContext;
#[cfg(feature = "which")]
use nu_errors::ShellError;
#[cfg(feature = "which")]
@ -534,7 +534,7 @@ mod tests {
let input = InputStream::empty();
let mut ctx =
basic_evaluation_context().expect("There was a problem creating a basic context.");
EvaluationContext::basic().expect("There was a problem creating a basic context.");
assert!(
run_external_command(cmd, &mut ctx, input, ExternalRedirection::Stdout)
@ -548,7 +548,7 @@ mod tests {
// async fn failure_run() -> Result<(), ShellError> {
// let cmd = ExternalBuilder::for_name("fail").build();
// let mut ctx = crate::cli::basic_evaluation_context().expect("There was a problem creating a basic context.");
// let mut ctx = crate::cli::EvaluationContext::basic().expect("There was a problem creating a basic context.");
// let stream = run_external_command(cmd, &mut ctx, None, false)
// .await?
// .expect("There was a problem running the external command.");

View File

@ -2,7 +2,7 @@ use crate::prelude::*;
use nu_engine::CommandArgs;
use nu_engine::WholeStreamCommand;
use nu_errors::ShellError;
use nu_protocol::{ReturnSuccess, Signature, UntaggedValue};
use nu_protocol::{Primitive, ReturnSuccess, Signature, UntaggedValue, Value};
use nu_stream::OutputStream;
pub struct Command;
@ -22,9 +22,16 @@ impl WholeStreamCommand for Command {
}
async fn run(&self, args: CommandArgs) -> Result<OutputStream, ShellError> {
let name_span = args.call_info.name_tag.clone();
let name = args.call_info.name_tag;
let result = nu_data::config::read(name_span, &None)?;
let name = args.call_info.name_tag.clone();
let path = match args.scope.get_var("config-path") {
Some(Value {
value: UntaggedValue::Primitive(Primitive::FilePath(path)),
..
}) => Some(path),
_ => nu_data::config::default_path().ok(),
};
let result = nu_data::config::read(&name, &path)?;
Ok(futures::stream::iter(vec![ReturnSuccess::value(
UntaggedValue::Row(result.into()).into_value(name),

View File

@ -1,10 +1,9 @@
use crate::prelude::*;
use nu_engine::basic_evaluation_context;
use nu_engine::whole_stream_command;
use nu_engine::EvaluationContext;
use std::error::Error;
pub fn create_default_context(interactive: bool) -> Result<EvaluationContext, Box<dyn Error>> {
let context = basic_evaluation_context()?;
let context = EvaluationContext::basic()?;
{
use crate::commands::*;

View File

@ -1,6 +1,5 @@
use crate::prelude::*;
use nu_data::config::{Conf, NuConfig};
use nu_engine::history_path;
use nu_data::config::{path::history as history_path, NuConfig};
use nu_engine::WholeStreamCommand;
use nu_errors::ShellError;
use nu_protocol::{ReturnSuccess, Signature, UntaggedValue};
@ -34,7 +33,7 @@ impl WholeStreamCommand for History {
}
async fn history(args: CommandArgs) -> Result<OutputStream, ShellError> {
let config: Box<dyn Conf> = Box::new(NuConfig::new());
let config = NuConfig::new();
let tag = args.call_info.name_tag.clone();
let (Arguments { clear }, _) = args.process().await?;

View File

@ -6,14 +6,12 @@ mod stub_generate;
use double_echo::Command as DoubleEcho;
use double_ls::Command as DoubleLs;
use stub_generate::{mock_path, Command as StubOpen};
use nu_engine::basic_evaluation_context;
use nu_errors::ShellError;
use nu_parser::ParserScope;
use nu_protocol::hir::ClassifiedBlock;
use nu_protocol::{ShellTypeName, Value};
use nu_source::AnchorLocation;
use stub_generate::{mock_path, Command as StubOpen};
use crate::commands::{
Append, BuildString, Each, Echo, First, Get, Keep, Last, Let, Nth, Select, StrCollect, Wrap,
@ -26,7 +24,7 @@ use futures::executor::block_on;
pub fn test_examples(cmd: Command) -> Result<(), ShellError> {
let examples = cmd.examples();
let base_context = basic_evaluation_context()?;
let base_context = EvaluationContext::basic()?;
base_context.add_commands(vec![
// Command Doubles
@ -92,7 +90,7 @@ pub fn test_examples(cmd: Command) -> Result<(), ShellError> {
pub fn test(cmd: impl WholeStreamCommand + 'static) -> Result<(), ShellError> {
let examples = cmd.examples();
let base_context = basic_evaluation_context()?;
let base_context = EvaluationContext::basic()?;
base_context.add_commands(vec![
whole_stream_command(Echo {}),
@ -149,7 +147,7 @@ pub fn test(cmd: impl WholeStreamCommand + 'static) -> Result<(), ShellError> {
pub fn test_anchors(cmd: Command) -> Result<(), ShellError> {
let examples = cmd.examples();
let base_context = basic_evaluation_context()?;
let base_context = EvaluationContext::basic()?;
base_context.add_commands(vec![
// Minimal restricted commands to aid in testing