std run-tests: Rename --command switch to --test; and likewise in --list output (#8895)

# Description
<!--
Thank you for improving Nushell. Please, check our [contributing
guide](../CONTRIBUTING.md) and talk to the core team before making major
changes.

Description of your pull request goes here. **Provide examples and/or
screenshots** if your changes affect the user experience.
-->
As described in #8893, switch `std run-tests --module` is ambiguous, but
`--command` is misleading. Fix that, and rename `--list` output column
to match.

# User-Facing Changes
<!-- List of all changes that impact the user experience here. This
helps us keep track of breaking changes. -->
* `std run-tests --command test_foo_1` changes to `std run-tests --test
test_foo_1`, users may have to change existing scripts (or muscle
memory).
*
  ```
  〉std run-tests --list | columns
  ╭───┬────────╮
  │ 0 │ module │
  │ 1 │ name   │
  │ 2 │ file   │
  ╰───┴────────╯
  ```
  Changes to:
  
  ```
  〉std run-tests --list | columns
  ╭───┬────────╮
  │ 0 │ module │
  │ 1 │ test   │
  │ 2 │ file   │
  ╰───┴────────╯
  
  ```
# Tests + Formatting
<!--
Don't forget to add tests that cover your changes.

Make sure you've run and fixed any issues with these commands:

- `cargo fmt --all -- --check` to check standard code formatting (`cargo
fmt --all` applies these changes)
- `cargo clippy --workspace -- -D warnings -D clippy::unwrap_used -A
clippy::needless_collect` to check that you're using the standard code
style
- `cargo test --workspace` to check that all tests pass
- `cargo run -- crates/nu-std/tests/run.nu` to run the tests for the
standard library

> **Note**
> from `nushell` you can also use the `toolkit` as follows
> ```bash
> use toolkit.nu # or use an `env_change` hook to activate it
automatically
> toolkit check pr
> ```
-->

# After Submitting
<!-- If your PR had any user-facing changes, update [the
documentation](https://github.com/nushell/nushell.github.io) after the
PR is merged, if necessary. This will help us keep the docs up to date.
-->
This commit is contained in:
Bob Hyman 2023-04-20 20:41:33 -04:00 committed by GitHub
parent fb72da0e82
commit 24b4ac692e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -240,7 +240,7 @@ def show-pretty-test [indent: int = 4] {
_ => { char failed } _ => { char failed }
}) })
" " " "
$"($test.module) ($test.name)" $"($test.module) ($test.test)"
(ansi reset) (ansi reset)
] | str join ] | str join
} }
@ -261,8 +261,8 @@ def throw-error [error: record] {
# It executes exported "test_*" commands in "test_*" modules # It executes exported "test_*" commands in "test_*" modules
export def 'run-tests' [ export def 'run-tests' [
--path: path, # Path to look for tests. Default: current directory. --path: path, # Path to look for tests. Default: current directory.
--module: string, # Module to run tests. Default: all test modules found. --module: string, # Test module to run. Default: all test modules found.
--command: string, # Test command to run. Default: all test command found in the files. --test: string, # Individual test to run. Default: all test command found in the files.
--list, # list the selected tests without running them. --list, # list the selected tests without running them.
] { ] {
let module_search_pattern = ('**' | path join ({ let module_search_pattern = ('**' | path join ({
@ -304,11 +304,11 @@ export def 'run-tests' [
| upsert teardown {|module| "teardown" in $module.commands } | upsert teardown {|module| "teardown" in $module.commands }
| reject commands | reject commands
| flatten | flatten
| rename file module name | rename file module test
) )
let tests_to_run = (if not ($command | is-empty) { let tests_to_run = (if not ($test | is-empty) {
$tests | where name == $command $tests | where test == $test
} else if not ($module | is-empty) { } else if not ($module | is-empty) {
$tests | where module == $module $tests | where module == $module
} else { } else {
@ -316,7 +316,7 @@ export def 'run-tests' [
}) })
if $list { if $list {
return ($tests_to_run | select module name file) return ($tests_to_run | select module test file)
} }
if ($tests_to_run | is-empty) { if ($tests_to_run | is-empty) {
@ -328,9 +328,9 @@ export def 'run-tests' [
| group-by module | group-by module
| transpose name tests | transpose name tests
| each {|module| | each {|module|
log info $"Running tests in ($module.name)" log info $"Running tests in module ($module.name)"
$module.tests | each {|test| $module.tests | each {|test|
log debug $"Running test ($test.name)" log debug $"Running test ($test.test)"
let context_setup = if $test.setup { let context_setup = if $test.setup {
$"use `($test.file)` setup; let context = \(setup\)" $"use `($test.file)` setup; let context = \(setup\)"
@ -346,9 +346,9 @@ export def 'run-tests' [
let nu_script = $' let nu_script = $'
($context_setup) ($context_setup)
use `($test.file)` ($test.name) use `($test.file)` ($test.test)
try { try {
$context | ($test.name) $context | ($test.test)
($context_teardown) ($context_teardown)
} catch { |err| } catch { |err|
($context_teardown) ($context_teardown)
@ -367,7 +367,7 @@ export def 'run-tests' [
_ => "fail", _ => "fail",
} }
if $result == "skip" { if $result == "skip" {
log warning $"Test case ($test.name) is skipped" log warning $"Test case ($test.test) is skipped"
} }
$test | merge ({result: $result}) $test | merge ({result: $result})
} }