mirror of
https://github.com/nushell/nushell.git
synced 2025-06-30 06:30:08 +02:00
std: refactor test-runner to no longer require tests to be exported (#9355)
# Description Test runner now performs following actions in order to run tests: * Module file is opened * Public function with random name is added to the source code, this function calls user-specified private function * Modified module file is saved under random name in $nu.temp-path * Modified module file is imported in subprocess, injected function is called by the test runner # User-Facing Changes <!-- List of all changes that impact the user experience here. This helps us keep track of breaking changes. --> * Test functions no longer need to be exported * test functions no longer need to reside in separate test_ files * setup and teardown renamed to before-each and after-each respectively * before-all and after-all functions added that run before all tests in given module. This matches the behavior of test runners used by other languages such as JUnit/TestNG or Mocha # Tests + Formatting # After Submitting --------- Co-authored-by: Kamil <skelly37@protonmail.com> Co-authored-by: amtoine <stevan.antoine@gmail.com>
This commit is contained in:
@ -286,174 +286,3 @@ It's been this long since (ansi green)Nushell(ansi reset)'s first commit:
|
||||
Startup Time: ($nu.startup-time)
|
||||
"
|
||||
}
|
||||
|
||||
# show a test record in a pretty way
|
||||
#
|
||||
# `$in` must be a `record<file: string, module: string, name: string, pass: bool>`.
|
||||
#
|
||||
# the output would be like
|
||||
# - "<indentation> x <module> <test>" all in red if failed
|
||||
# - "<indentation> s <module> <test>" all in yellow if skipped
|
||||
# - "<indentation> <module> <test>" all in green if passed
|
||||
def show-pretty-test [indent: int = 4] {
|
||||
let test = $in
|
||||
|
||||
[
|
||||
(" " * $indent)
|
||||
(match $test.result {
|
||||
"pass" => { ansi green },
|
||||
"skip" => { ansi yellow },
|
||||
_ => { ansi red }
|
||||
})
|
||||
(match $test.result {
|
||||
"pass" => " ",
|
||||
"skip" => "s",
|
||||
_ => { char failed }
|
||||
})
|
||||
" "
|
||||
$"($test.module) ($test.test)"
|
||||
(ansi reset)
|
||||
] | str join
|
||||
}
|
||||
|
||||
def throw-error [error: record] {
|
||||
error make {
|
||||
msg: $"(ansi red)($error.msg)(ansi reset)"
|
||||
label: {
|
||||
text: ($error.label)
|
||||
start: $error.span.start
|
||||
end: $error.span.end
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Run Nushell tests
|
||||
#
|
||||
# It executes exported "test_*" commands in "test_*" modules
|
||||
export def 'run-tests' [
|
||||
--path: path, # Path to look for tests. Default: current directory.
|
||||
--module: string, # Test module to run. Default: all test modules found.
|
||||
--test: string, # Individual test to run. Default: all test command found in the files.
|
||||
--list, # list the selected tests without running them.
|
||||
] {
|
||||
let module_search_pattern = ('**' | path join ({
|
||||
stem: ($module | default "test_*")
|
||||
extension: nu
|
||||
} | path join))
|
||||
|
||||
let path = ($path | default $env.PWD)
|
||||
|
||||
if not ($path | path exists) {
|
||||
throw-error {
|
||||
msg: "directory_not_found"
|
||||
label: "no such directory"
|
||||
span: (metadata $path | get span)
|
||||
}
|
||||
}
|
||||
|
||||
if not ($module | is-empty) {
|
||||
try { ls ($path | path join $module_search_pattern) | null } catch {
|
||||
throw-error {
|
||||
msg: "module_not_found"
|
||||
label: $"no such module in ($path)"
|
||||
span: (metadata $module | get span)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let tests = (
|
||||
ls ($path | path join $module_search_pattern)
|
||||
| each {|row| {file: $row.name name: ($row.name | path parse | get stem)}}
|
||||
| upsert commands {|module|
|
||||
^$nu.current-exe -c $'use `($module.file)` *; $nu.scope.commands | select name module_name | to nuon'
|
||||
| from nuon
|
||||
| where module_name == $module.name
|
||||
| get name
|
||||
}
|
||||
| upsert test {|module| $module.commands | where ($it | str starts-with "test_") }
|
||||
| upsert setup {|module| "setup" in $module.commands }
|
||||
| upsert teardown {|module| "teardown" in $module.commands }
|
||||
| reject commands
|
||||
| flatten
|
||||
| rename file module test
|
||||
)
|
||||
|
||||
let tests_to_run = (if not ($test | is-empty) {
|
||||
$tests | where test == $test
|
||||
} else if not ($module | is-empty) {
|
||||
$tests | where module == $module
|
||||
} else {
|
||||
$tests
|
||||
})
|
||||
|
||||
if $list {
|
||||
return ($tests_to_run | select module test file)
|
||||
}
|
||||
|
||||
if ($tests_to_run | is-empty) {
|
||||
error make --unspanned {msg: "no test to run"}
|
||||
}
|
||||
|
||||
let tests = (
|
||||
$tests_to_run
|
||||
| group-by module
|
||||
| transpose name tests
|
||||
| each {|module|
|
||||
log info $"Running tests in module ($module.name)"
|
||||
$module.tests | each {|test|
|
||||
log debug $"Running test ($test.test)"
|
||||
|
||||
let context_setup = if $test.setup {
|
||||
$"use `($test.file)` setup; let context = \(setup\)"
|
||||
} else {
|
||||
"let context = {}"
|
||||
}
|
||||
|
||||
let context_teardown = if $test.teardown {
|
||||
$"use `($test.file)` teardown; $context | teardown"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let nu_script = $'
|
||||
($context_setup)
|
||||
use `($test.file)` ($test.test)
|
||||
try {
|
||||
$context | ($test.test)
|
||||
($context_teardown)
|
||||
} catch { |err|
|
||||
($context_teardown)
|
||||
if $err.msg == "ASSERT:SKIP" {
|
||||
exit 2
|
||||
} else {
|
||||
$err | get raw
|
||||
}
|
||||
}
|
||||
'
|
||||
^$nu.current-exe -c $nu_script
|
||||
|
||||
let result = match $env.LAST_EXIT_CODE {
|
||||
0 => "pass",
|
||||
2 => "skip",
|
||||
_ => "fail",
|
||||
}
|
||||
if $result == "skip" {
|
||||
log warning $"Test case ($test.test) is skipped"
|
||||
}
|
||||
$test | merge ({result: $result})
|
||||
}
|
||||
}
|
||||
| flatten
|
||||
)
|
||||
|
||||
if not ($tests | where result == "fail" | is-empty) {
|
||||
let text = ([
|
||||
$"(ansi purple)some tests did not pass (char lparen)see complete errors above(char rparen):(ansi reset)"
|
||||
""
|
||||
($tests | each {|test| ($test | show-pretty-test 4)} | str join "\n")
|
||||
""
|
||||
] | str join "\n")
|
||||
|
||||
error make --unspanned { msg: $text }
|
||||
}
|
||||
}
|
||||
|
249
crates/nu-std/std/testing.nu
Normal file
249
crates/nu-std/std/testing.nu
Normal file
@ -0,0 +1,249 @@
|
||||
use log.nu
|
||||
|
||||
|
||||
def throw-error [error: record] {
|
||||
error make {
|
||||
msg: $"(ansi red)($error.msg)(ansi reset)"
|
||||
label: {
|
||||
text: ($error.label)
|
||||
start: $error.span.start
|
||||
end: $error.span.end
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# show a test record in a pretty way
|
||||
#
|
||||
# `$in` must be a `record<file: string, module: string, name: string, pass: bool>`.
|
||||
#
|
||||
# the output would be like
|
||||
# - "<indentation> x <module> <test>" all in red if failed
|
||||
# - "<indentation> s <module> <test>" all in yellow if skipped
|
||||
# - "<indentation> <module> <test>" all in green if passed
|
||||
def show-pretty-test [indent: int = 4] {
|
||||
let test = $in
|
||||
|
||||
[
|
||||
(" " * $indent)
|
||||
(match $test.result {
|
||||
"pass" => { ansi green },
|
||||
"skip" => { ansi yellow },
|
||||
_ => { ansi red }
|
||||
})
|
||||
(match $test.result {
|
||||
"pass" => " ",
|
||||
"skip" => "s",
|
||||
_ => { char failed }
|
||||
})
|
||||
" "
|
||||
$"($test.name) ($test.test)"
|
||||
(ansi reset)
|
||||
] | str join
|
||||
}
|
||||
|
||||
def get-commands [
|
||||
file: path
|
||||
] {
|
||||
^$nu.current-exe --ide-ast $file
|
||||
| from json
|
||||
| get content
|
||||
| split list def
|
||||
| skip 1
|
||||
| each {get 0}
|
||||
}
|
||||
|
||||
def run-test [
|
||||
test: record
|
||||
] {
|
||||
let test_file_name = (random chars -l 10)
|
||||
let test_function_name = (random chars -l 10)
|
||||
let rendered_module_path = ({parent: ($test.file|path dirname), stem: $test_file_name, extension: nu}| path join)
|
||||
|
||||
let test_function = $"
|
||||
export def ($test_function_name) [] {
|
||||
($test.before-each)
|
||||
try {
|
||||
$context | ($test.test)
|
||||
($test.after-each)
|
||||
} catch { |err|
|
||||
($test.after-each)
|
||||
if $err.msg == "ASSERT:SKIP" {
|
||||
exit 2
|
||||
} else {
|
||||
$err | get raw
|
||||
}
|
||||
}
|
||||
}
|
||||
"
|
||||
open $test.file
|
||||
| lines
|
||||
| append ($test_function)
|
||||
| str join (char nl)
|
||||
| save $rendered_module_path
|
||||
|
||||
let result = (
|
||||
^$nu.current-exe -c $"use ($rendered_module_path) *; ($test_function_name)|to nuon"
|
||||
| complete
|
||||
|
||||
)
|
||||
|
||||
rm $rendered_module_path
|
||||
|
||||
return $result
|
||||
}
|
||||
|
||||
def run-tests-for-module [
|
||||
module: record
|
||||
] {
|
||||
let global_context = if $module.before-all {
|
||||
log info $"Running before-all for module ($module.name)"
|
||||
run-test {
|
||||
file: $module.file,
|
||||
before-each: 'let context = {}',
|
||||
after-each: '',
|
||||
test: 'before-all'
|
||||
}
|
||||
| if $in.exit_code == 0 {
|
||||
$in.stdout
|
||||
} else {
|
||||
throw-error {
|
||||
msg: "Before-all failed"
|
||||
label: "Failure in test setup"
|
||||
span: (metadata $in | get span)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
|
||||
let tests = (
|
||||
$module
|
||||
| flatten
|
||||
| rename -c [tests test]
|
||||
| update before-each {|x|
|
||||
if $module.before-each {
|
||||
$"let context = \(($global_context)|merge \(before-each\)\)"
|
||||
} else {
|
||||
$"let context = ($global_context)"
|
||||
}
|
||||
}
|
||||
| update after-each {|x|
|
||||
if $module.after-each {
|
||||
'$context | after-each'
|
||||
} else {
|
||||
''
|
||||
}
|
||||
}
|
||||
| each {|test|
|
||||
log info $"Running ($test.test) in module ($module.name) with context ($global_context)"
|
||||
$test|insert result {|x|
|
||||
run-test $test
|
||||
| match $in.exit_code {
|
||||
0 => "pass",
|
||||
2 => "skip",
|
||||
_ => "fail",
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
if $module.after-all {
|
||||
log info $"Running after-all for module ($module.name)"
|
||||
|
||||
run-test {
|
||||
file: $module.file,
|
||||
before-each: $"let context = ($global_context)",
|
||||
after-each: '',
|
||||
test: 'after-all'
|
||||
}
|
||||
}
|
||||
return $tests
|
||||
}
|
||||
|
||||
export def run-tests [
|
||||
--path: path, # Path to look for tests. Default: current directory.
|
||||
--module: string, # Test module to run. Default: all test modules found.
|
||||
--test: string, # Individual test to run. Default: all test command found in the files.
|
||||
--list, # list the selected tests without running them.
|
||||
] {
|
||||
let module_search_pattern = ('**' | path join ({
|
||||
stem: ($module | default "*")
|
||||
extension: nu
|
||||
} | path join))
|
||||
|
||||
let path = if $path == null {
|
||||
$env.PWD
|
||||
} else {
|
||||
if not ($path | path exists) {
|
||||
throw-error {
|
||||
msg: "directory_not_found"
|
||||
label: "no such directory"
|
||||
span: (metadata $path | get span)
|
||||
}
|
||||
}
|
||||
$path
|
||||
}
|
||||
|
||||
if not ($module | is-empty) {
|
||||
try { ls ($path | path join $module_search_pattern) | null } catch {
|
||||
throw-error {
|
||||
msg: "module_not_found"
|
||||
label: $"no such module in ($path)"
|
||||
span: (metadata $module | get span)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let modules = (
|
||||
ls ($path | path join $module_search_pattern)
|
||||
| each {|row| {file: $row.name name: ($row.name | path parse | get stem)}}
|
||||
| upsert commands {|module|
|
||||
get-commands $module.file
|
||||
}
|
||||
| upsert tests {|module| $module.commands|where $it starts-with "test_"}
|
||||
| filter {|x| ($x.tests|length) > 0}
|
||||
| filter {|x| if ($test|is-empty) {true} else {$test in $x.tests}}
|
||||
| filter {|x| if ($module|is-empty) {true} else {$module == $x.name}}
|
||||
| upsert before-each {|module| "before-each" in $module.commands}
|
||||
| upsert before-all {|module| "before-all" in $module.commands}
|
||||
| upsert after-each {|module| "after-each" in $module.commands}
|
||||
| upsert after-all {|module| "after-all" in $module.commands}
|
||||
| reject commands
|
||||
| rename file name tests
|
||||
| update tests {|x|
|
||||
if ($test|is-empty) {
|
||||
$x.tests
|
||||
} else {
|
||||
$x.tests
|
||||
| where $it == $test
|
||||
}
|
||||
}
|
||||
)
|
||||
if $list {
|
||||
return $modules
|
||||
}
|
||||
|
||||
if ($modules | is-empty) {
|
||||
error make --unspanned {msg: "no test to run"}
|
||||
}
|
||||
|
||||
let results = (
|
||||
$modules
|
||||
| each {|module|
|
||||
run-tests-for-module $module
|
||||
}
|
||||
| flatten
|
||||
| select name test result
|
||||
)
|
||||
if not ($results | where result == "fail" | is-empty) {
|
||||
let text = ([
|
||||
$"(ansi purple)some tests did not pass (char lparen)see complete errors below(char rparen):(ansi reset)"
|
||||
""
|
||||
($results | each {|test| ($test | show-pretty-test 4)} | str join "\n")
|
||||
""
|
||||
] | str join "\n")
|
||||
|
||||
error make --unspanned { msg: $text }
|
||||
}
|
||||
|
||||
}
|
Reference in New Issue
Block a user