forked from extern/nushell
standard library: bring the tests into the main CI (#8525)
Should close one of the tasks in #8450. # Description > **Note** > in order of appearance in the global diff -1b7497c419
adds the `std-tests` job to the CI which 1. installs `nushell` in the runner 2. run the `tests.nu` module > see `open .github/workflows/ci.yml | get jobs.std-tests | to yaml` - [`ec85b6fd`..`9c122115`](ec85b6fd3fc004cd94e3fada5c8e5fe2714fd629..9c12211564ca8ee90ed65ae45776dccb8f8e4ef1) is where all the magic happens => see below - 🧪799c7eb7fd
introduces some bugs and failing test to see how the CI behaves => see how the [tests failed](https://github.com/nushell/nushell/actions/runs/4460098237/jobs/7833018256) as expected ❌ - 🧪 andc3de1fafb5
reverts the failing tests, i.e. the previous commit, leaving a standard library whose tests all pass 🎉 => see the [tests passing](https://github.com/nushell/nushell/actions/runs/4460153434/jobs/7833110719?pr=8525#step:5:1) now ✔️ ## the changes to the runner > see [`ec85b6fd`..`9c122115`](ec85b6fd3fc004cd94e3fada5c8e5fe2714fd629..9c12211564ca8ee90ed65ae45776dccb8f8e4ef1) the issue with the previous runner was the following: the clever trick of using `nu -c "use ...; test"` did print the errors when occuring but they did not capture the true "failure", i.e. in all cases the `$env.LAST_EXIT_CODE` was set to `0`, never stopping the CI when a test failed 🤔 i first tried to `try` / `catch` the error inec85b6fd3f
which kinda worked but only throw a single error, the first one i thought it was not the best and started thinking about a solution to have a complete report of all failing tests, at once, to avoid running the CI multiple times! the easiest solution i found was the one i implemented in9c12211564
> **Warning** > this changes the structure of the runner quite a bit, but the `for` loops where annoying to manipulate structured data and allow the runner to draw a complete report... now the runner does the following - compute the list of all available tests in a table with the `file`, `module` and `name` columns (first part of the pipe until `flatten` and `rename`) - run the tests one by one computing the new `pass` column - with a `log info` - captures the failing ones => puts `true` in `pass` if the test passes, `false` otherwise - if at least one test has failed, throw a single error with the list of failing tests ### hope you'll like it 😌 # User-Facing Changes ``` $nothing ``` # Tests + Formatting the standard tests now return a true error that will stop the CI # After Submitting ``` $nothing ```
This commit is contained in:
parent
6872d2ac2a
commit
0567407f85
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@ -82,9 +82,9 @@ jobs:
|
||||
- name: Tests
|
||||
run: cargo test --workspace --profile ci --exclude nu_plugin_* ${{ matrix.flags }}
|
||||
|
||||
python-virtualenv:
|
||||
std-lib-and-python-virtualenv:
|
||||
env:
|
||||
NUSHELL_CARGO_TARGET: ci
|
||||
NU_LOG_LEVEL: DEBUG
|
||||
|
||||
strategy:
|
||||
fail-fast: true
|
||||
@ -104,7 +104,19 @@ jobs:
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1.4.4
|
||||
|
||||
- name: Install Nushell
|
||||
run: cargo install --locked --path=. --profile ci --no-default-features
|
||||
# prior to [*standard library: bring the tests into the main CI*](#8525)
|
||||
# there was a `--profile ci` here in the `cargo install`, as well as
|
||||
# `NUSHELL_CARGO_TARGET: ci` in the prelude above.
|
||||
#
|
||||
# this caused a "stackoverflow" error in the CI on windows,
|
||||
# see [this failing job](https://github.com/nushell/nushell/actions/runs/4512034615/jobs/7944945590)
|
||||
#
|
||||
# the CI profile has been removed in 00b820de9021227d1910a9ea388297ee7aee308e
|
||||
# as part of #8525.
|
||||
run: cargo install --path . --locked --no-default-features
|
||||
|
||||
- name: Standard library tests
|
||||
run: nu crates/nu-utils/standard_library/tests.nu
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
|
@ -1,31 +1,23 @@
|
||||
use std.nu *
|
||||
|
||||
def collect-modules [
|
||||
path: path,
|
||||
module?: string
|
||||
] {
|
||||
let tests_path = ($path | default $env.FILE_PWD)
|
||||
let module_search = ($module | default "test_*")
|
||||
(ls ($tests_path | path join $"**/($module_search).nu") -f | get name)
|
||||
}
|
||||
# show a test record in a pretty way
|
||||
#
|
||||
# `$in` must be a `record<file: string, module: string, name: string, pass: bool>`.
|
||||
#
|
||||
# the output would be like
|
||||
# - "<indentation> x <module> <test>" all in red if failed
|
||||
# - "<indentation> <module> <test>" all in green if passed
|
||||
def show-pretty-test [indent: int = 4] {
|
||||
let test = $in
|
||||
|
||||
def collect-commands [
|
||||
test_file: string,
|
||||
module_name: string,
|
||||
command?: string
|
||||
] {
|
||||
let commands = (
|
||||
nu -c $'use ($test_file) *; $nu.scope.commands | select name module_name | to nuon'
|
||||
| from nuon
|
||||
| where module_name == $module_name
|
||||
| where ($it.name | str starts-with "test_")
|
||||
| get name
|
||||
)
|
||||
if $command == null {
|
||||
$commands
|
||||
} else {
|
||||
$commands | where $it == $command
|
||||
}
|
||||
[
|
||||
(" " * $indent)
|
||||
(if $test.pass { ansi green } else { ansi red})
|
||||
(if $test.pass { " " } else { char failed})
|
||||
" "
|
||||
$"($test.module) ($test.name)"
|
||||
(ansi reset)
|
||||
] | str join
|
||||
}
|
||||
|
||||
# Test executor
|
||||
@ -35,22 +27,61 @@ def main [
|
||||
--path: path, # Path to look for tests. Default: directory of this file.
|
||||
--module: string, # Module to run tests. Default: all test modules found.
|
||||
--command: string, # Test command to run. Default: all test command found in the files.
|
||||
--list, # Do not run any tests, just list them (dry run)
|
||||
--list, # list the selected tests without running them.
|
||||
] {
|
||||
let dry_run = ($list | default false)
|
||||
for test_file in (collect-modules $path $module) {
|
||||
let $module_name = ($test_file | path parse).stem
|
||||
|
||||
log info $"Run tests in ($module_name)"
|
||||
let tests = (collect-commands $test_file $module_name $command)
|
||||
|
||||
for test_case in $tests {
|
||||
log debug $"Run test ($module_name) ($test_case)"
|
||||
if $dry_run {
|
||||
continue
|
||||
}
|
||||
|
||||
nu -c $'use ($test_file) ($test_case); ($test_case)'
|
||||
let tests = (
|
||||
ls ($path | default $env.FILE_PWD | path join "test_*.nu")
|
||||
| each {|row| {file: $row.name name: ($row.name | path parse | get stem)}}
|
||||
| upsert test {|module|
|
||||
nu -c $'use ($module.file) *; $nu.scope.commands | select name module_name | to nuon'
|
||||
| from nuon
|
||||
| where module_name == $module.name
|
||||
| where ($it.name | str starts-with "test_")
|
||||
| get name
|
||||
}
|
||||
| flatten
|
||||
| rename file module name
|
||||
)
|
||||
|
||||
let tests_to_run = (if not ($command | is-empty) {
|
||||
$tests | where name == $command
|
||||
} else if not ($module | is-empty) {
|
||||
$tests | where module == $module
|
||||
} else {
|
||||
$tests
|
||||
})
|
||||
|
||||
if $list {
|
||||
return ($tests_to_run | select module name file)
|
||||
}
|
||||
|
||||
let tests = (
|
||||
$tests_to_run
|
||||
| group-by module
|
||||
| transpose name tests
|
||||
| each {|module|
|
||||
log info $"Running tests in ($module.name)"
|
||||
$module.tests | each {|test|
|
||||
log debug $"Running test ($test.name)"
|
||||
let did_pass = (try {
|
||||
nu -c $'use ($test.file) ($test.name); ($test.name)'
|
||||
true
|
||||
} catch { false })
|
||||
|
||||
$test | merge ({pass: $did_pass})
|
||||
}
|
||||
}
|
||||
| flatten
|
||||
)
|
||||
|
||||
if not ($tests | where not pass | is-empty) {
|
||||
let text = ([
|
||||
$"(ansi purple)some tests did not pass (char lparen)see complete errors above(char rparen):(ansi reset)"
|
||||
""
|
||||
($tests | each {|test| ($test | show-pretty-test 4)} | str join "\n")
|
||||
""
|
||||
] | str join "\n")
|
||||
|
||||
error make --unspanned { msg: $text }
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user