Apply nightly clippy lints (#9654)

# Description
- A new one is the removal of unnecessary `#` in raw strings without `"`
inside.
-
https://rust-lang.github.io/rust-clippy/master/index.html#/needless_raw_string_hashes
- The automatically applied removal of `.into_iter()` touched several
places where #9648 will change to the use of the record API. If
necessary I can remove them @IanManske to avoid churn with this PR.
- Manually applied `.try_fold` in two places
- Removed a dead `if`
- Manual: Combat rightward-drift with early return
This commit is contained in:
Stefan Holderbach 2023-07-12 00:00:31 +02:00 committed by GitHub
parent ad11e25fc5
commit bd0032898f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 49 additions and 63 deletions

View File

@ -59,7 +59,7 @@ impl Command for KeybindingsList {
_input: PipelineData, _input: PipelineData,
) -> Result<PipelineData, ShellError> { ) -> Result<PipelineData, ShellError> {
let records = if call.named_len() == 0 { let records = if call.named_len() == 0 {
let all_options = vec!["modifiers", "keycodes", "edits", "modes", "events"]; let all_options = ["modifiers", "keycodes", "edits", "modes", "events"];
all_options all_options
.iter() .iter()
.flat_map(|argument| get_records(argument, &call.head)) .flat_map(|argument| get_records(argument, &call.head))

View File

@ -205,10 +205,7 @@ impl Completer for CommandCompletion {
vec![] vec![]
}; };
subcommands subcommands.into_iter().chain(commands).collect::<Vec<_>>()
.into_iter()
.chain(commands.into_iter())
.collect::<Vec<_>>()
} }
fn get_sort_by(&self) -> SortBy { fn get_sort_by(&self) -> SortBy {

View File

@ -294,7 +294,7 @@ fn recursive_value(val: Value, sublevels: Vec<Vec<u8>>) -> Value {
vals, vals,
span: _, span: _,
} => { } => {
for item in cols.into_iter().zip(vals.into_iter()) { for item in cols.into_iter().zip(vals) {
// Check if index matches with sublevel // Check if index matches with sublevel
if item.0.as_bytes().to_vec() == next_sublevel { if item.0.as_bytes().to_vec() == next_sublevel {
// If matches try to fetch recursively the next // If matches try to fetch recursively the next

View File

@ -150,7 +150,7 @@ impl SQLContext {
let agg_df = df.groupby(group_by).agg(agg_projection); let agg_df = df.groupby(group_by).agg(agg_projection);
let mut final_proj_pos = groupby_pos let mut final_proj_pos = groupby_pos
.into_iter() .into_iter()
.chain(agg_proj_pos.into_iter()) .chain(agg_proj_pos)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
final_proj_pos.sort_by(|(proj_pa, _), (proj_pb, _)| proj_pa.cmp(proj_pb)); final_proj_pos.sort_by(|(proj_pa, _), (proj_pb, _)| proj_pa.cmp(proj_pb));

View File

@ -150,7 +150,7 @@ impl Command for UpdateCells {
.iter() .iter()
.map(|val| val.as_string()) .map(|val| val.as_string())
.collect::<Result<Vec<String>, ShellError>>()?; .collect::<Result<Vec<String>, ShellError>>()?;
Some(HashSet::from_iter(cols.into_iter())) Some(HashSet::from_iter(cols))
} }
None => None, None => None,
}; };
@ -197,7 +197,7 @@ impl Iterator for UpdateCellIterator {
Value::Record { vals, cols, span } => Some(Value::Record { Value::Record { vals, cols, span } => Some(Value::Record {
vals: cols vals: cols
.iter() .iter()
.zip(vals.into_iter()) .zip(vals)
.map(|(col, val)| match &self.columns { .map(|(col, val)| match &self.columns {
Some(cols) if !cols.contains(col) => val, Some(cols) if !cols.contains(col) => val,
_ => process_cell( _ => process_cell(

View File

@ -188,8 +188,7 @@ pub fn check_all_signature_input_output_types_entries_have_examples(
signature: Signature, signature: Signature,
witnessed_type_transformations: HashSet<(Type, Type)>, witnessed_type_transformations: HashSet<(Type, Type)>,
) { ) {
let declared_type_transformations = let declared_type_transformations = HashSet::from_iter(signature.input_output_types);
HashSet::from_iter(signature.input_output_types.into_iter());
assert!( assert!(
witnessed_type_transformations.is_subset(&declared_type_transformations), witnessed_type_transformations.is_subset(&declared_type_transformations),
"This should not be possible (bug in test): the type transformations \ "This should not be possible (bug in test): the type transformations \

View File

@ -67,7 +67,7 @@ impl Command for Start {
} else { } else {
// open crate does not allow opening URL without prefix // open crate does not allow opening URL without prefix
let path_with_prefix = Path::new("https://").join(&path.item); let path_with_prefix = Path::new("https://").join(&path.item);
let common_domains = vec!["com", "net", "org", "edu", "sh"]; let common_domains = ["com", "net", "org", "edu", "sh"];
if let Some(url) = path_with_prefix.to_str() { if let Some(url) = path_with_prefix.to_str() {
let url = url::Url::parse(url).map_err(|_| { let url = url::Url::parse(url).map_err(|_| {
ShellError::GenericError( ShellError::GenericError(

View File

@ -98,7 +98,7 @@ impl Command for Items {
PipelineData::Empty => Ok(PipelineData::Empty), PipelineData::Empty => Ok(PipelineData::Empty),
PipelineData::Value(Value::Record { cols, vals, .. }, ..) => Ok(cols PipelineData::Value(Value::Record { cols, vals, .. }, ..) => Ok(cols
.into_iter() .into_iter()
.zip(vals.into_iter()) .zip(vals)
.map_while(run_for_each_item) .map_while(run_for_each_item)
.into_pipeline_data(ctrlc)), .into_pipeline_data(ctrlc)),
// Errors // Errors

View File

@ -250,8 +250,6 @@ fn move_record_columns(
} }
} }
if columns.is_empty() {}
let mut out_cols: Vec<String> = Vec::with_capacity(inp_cols.len()); let mut out_cols: Vec<String> = Vec::with_capacity(inp_cols.len());
let mut out_vals: Vec<Value> = Vec::with_capacity(inp_vals.len()); let mut out_vals: Vec<Value> = Vec::with_capacity(inp_vals.len());

View File

@ -105,7 +105,7 @@ only unwrap the outer list, and leave the variable's contents untouched."#
Ok(vec Ok(vec
.into_iter() .into_iter()
.chain(input.into_iter()) .chain(input)
.into_pipeline_data(engine_state.ctrlc.clone()) .into_pipeline_data(engine_state.ctrlc.clone())
.set_metadata(metadata)) .set_metadata(metadata))
} }

View File

@ -98,7 +98,7 @@ impl Command for Zip {
Ok(input Ok(input
.into_iter() .into_iter()
.zip(other.into_pipeline_data().into_iter()) .zip(other.into_pipeline_data())
.map(move |(x, y)| Value::List { .map(move |(x, y)| Value::List {
vals: vec![x, y], vals: vec![x, y],
span: head, span: head,

View File

@ -355,7 +355,7 @@ fn parse_attributes(
vals: Vec<Value>, vals: Vec<Value>,
) -> Result<IndexMap<String, String>, ShellError> { ) -> Result<IndexMap<String, String>, ShellError> {
let mut h = IndexMap::new(); let mut h = IndexMap::new();
for (k, v) in cols.into_iter().zip(vals.into_iter()) { for (k, v) in cols.into_iter().zip(vals) {
if let Value::String { val, .. } = v { if let Value::String { val, .. } = v {
h.insert(k, val); h.insert(k, val);
} else { } else {

View File

@ -155,10 +155,13 @@ pub fn highlight_search_in_table(
}); });
}; };
let has_match = cols.iter().zip(vals.iter_mut()).fold( let has_match = cols.iter().zip(vals.iter_mut()).try_fold(
Ok(false), false,
|acc: Result<bool, ShellError>, (col, val)| { |acc: bool, (col, val)| -> Result<bool, ShellError> {
if searched_cols.contains(&col.as_str()) { if !searched_cols.contains(&col.as_str()) {
// don't search this column
return Ok(acc);
}
if let Value::String { val: s, span } = val { if let Value::String { val: s, span } = val {
if s.to_lowercase().contains(&search_string) { if s.to_lowercase().contains(&search_string) {
*val = Value::String { *val = Value::String {
@ -170,19 +173,12 @@ pub fn highlight_search_in_table(
)?, )?,
span: *span, span: *span,
}; };
Ok(true) return Ok(true);
} else { }
}
// column does not contain the searched string // column does not contain the searched string
acc
}
} else {
// ignore non-string values // ignore non-string values
acc Ok(acc)
}
} else {
// don't search this column
acc
}
}, },
)?; )?;

View File

@ -100,8 +100,8 @@ impl Command for SubCommand {
let url_components = cols let url_components = cols
.iter() .iter()
.zip(vals.iter()) .zip(vals.iter())
.fold(Ok(UrlComponents::new()), |url, (k, v)| { .try_fold(UrlComponents::new(), |url, (k, v)| {
url?.add_component(k.clone(), v.clone(), span) url.add_component(k.clone(), v.clone(), span)
}); });
url_components?.to_url(span) url_components?.to_url(span)

View File

@ -515,12 +515,12 @@ impl Command for AnsiCommand {
) )
.switch( .switch(
"escape", // \x1b[ "escape", // \x1b[
r#"escape sequence without the escape character(s) ('\x1b[' is not required)"#, r"escape sequence without the escape character(s) ('\x1b[' is not required)",
Some('e'), Some('e'),
) )
.switch( .switch(
"osc", // \x1b] "osc", // \x1b]
r#"operating system command (osc) escape sequence without the escape character(s) ('\x1b]' is not required)"#, r"operating system command (osc) escape sequence without the escape character(s) ('\x1b]' is not required)",
Some('o'), Some('o'),
) )
.switch("list", "list available ansi code names", Some('l')) .switch("list", "list available ansi code names", Some('l'))

View File

@ -128,12 +128,12 @@ impl Command for SubCommand {
}, },
Example { Example {
description: "Find and replace contents without using the replace parameter as a regular expression", description: "Find and replace contents without using the replace parameter as a regular expression",
example: r#"'dogs_$1_cats' | str replace '\$1' '$2' -n"#, example: r"'dogs_$1_cats' | str replace '\$1' '$2' -n",
result: Some(Value::test_string("dogs_$2_cats")), result: Some(Value::test_string("dogs_$2_cats")),
}, },
Example { Example {
description: "Find and replace the first occurrence using string replacement *not* regular expressions", description: "Find and replace the first occurrence using string replacement *not* regular expressions",
example: r#"'c:\some\cool\path' | str replace 'c:\some\cool' '~' -s"#, example: r"'c:\some\cool\path' | str replace 'c:\some\cool' '~' -s",
result: Some(Value::test_string("~\\path")), result: Some(Value::test_string("~\\path")),
}, },
Example { Example {
@ -148,7 +148,7 @@ impl Command for SubCommand {
}, },
Example { Example {
description: "Find and replace with fancy-regex", description: "Find and replace with fancy-regex",
example: r#"'a successful b' | str replace '\b([sS])uc(?:cs|s?)e(ed(?:ed|ing|s?)|ss(?:es|ful(?:ly)?|i(?:ons?|ve(?:ly)?)|ors?)?)\b' '${1}ucce$2'"#, example: r"'a successful b' | str replace '\b([sS])uc(?:cs|s?)e(ed(?:ed|ing|s?)|ss(?:es|ful(?:ly)?|i(?:ons?|ve(?:ly)?)|ors?)?)\b' '${1}ucce$2'",
result: Some(Value::test_string("a successful b")), result: Some(Value::test_string("a successful b")),
}, },
Example { Example {

View File

@ -114,7 +114,7 @@ prints out the list properly."#
// dbg!("value::record"); // dbg!("value::record");
let mut items = vec![]; let mut items = vec![];
for (i, (c, v)) in cols.into_iter().zip(vals.into_iter()).enumerate() { for (i, (c, v)) in cols.into_iter().zip(vals).enumerate() {
items.push((i, c, v.into_string(", ", config))) items.push((i, c, v.into_string(", ", config)))
} }

View File

@ -4023,7 +4023,7 @@ fn parse_table_expression(working_set: &mut StateWorkingSet, span: Span) -> Expr
let ty = if working_set.parse_errors.len() == errors { let ty = if working_set.parse_errors.len() == errors {
let (ty, errs) = table_type(&head, &rows); let (ty, errs) = table_type(&head, &rows);
working_set.parse_errors.extend(errs.into_iter()); working_set.parse_errors.extend(errs);
ty ty
} else { } else {
Type::Table(vec![]) Type::Table(vec![])
@ -5039,7 +5039,7 @@ pub fn parse_expression(
String::from_utf8(bytes) String::from_utf8(bytes)
.expect("builtin commands bytes should be able to convert to string"), .expect("builtin commands bytes should be able to convert to string"),
String::from_utf8_lossy(match spans.len() { String::from_utf8_lossy(match spans.len() {
1 | 2 | 3 => b"value", 1..=3 => b"value",
_ => working_set.get_span_contents(spans[3]), _ => working_set.get_span_contents(spans[3]),
}) })
.to_string(), .to_string(),

View File

@ -64,11 +64,7 @@ impl RawStream {
pub fn chain(self, stream: RawStream) -> RawStream { pub fn chain(self, stream: RawStream) -> RawStream {
RawStream { RawStream {
stream: Box::new(self.stream.chain(stream.stream)), stream: Box::new(self.stream.chain(stream.stream)),
leftover: self leftover: self.leftover.into_iter().chain(stream.leftover).collect(),
.leftover
.into_iter()
.chain(stream.leftover.into_iter())
.collect(),
ctrlc: self.ctrlc, ctrlc: self.ctrlc,
is_binary: self.is_binary, is_binary: self.is_binary,
span: self.span, span: self.span,

View File

@ -8,7 +8,7 @@ fn infers_types() {
Playground::setup("filter_from_vcf_test_1", |dirs, sandbox| { Playground::setup("filter_from_vcf_test_1", |dirs, sandbox| {
sandbox.with_files(vec![FileWithContentToBeTrimmed( sandbox.with_files(vec![FileWithContentToBeTrimmed(
"contacts.vcf", "contacts.vcf",
r#" r"
BEGIN:VCARD BEGIN:VCARD
VERSION:3.0 VERSION:3.0
FN:John Doe FN:John Doe
@ -29,7 +29,7 @@ fn infers_types() {
TEL;TYPE=CELL:(890) 123-4567 TEL;TYPE=CELL:(890) 123-4567
CATEGORIES:Band,myContacts CATEGORIES:Band,myContacts
END:VCARD END:VCARD
"#, ",
)]); )]);
let cwd = dirs.test(); let cwd = dirs.test();
@ -48,7 +48,7 @@ fn from_vcf_text_to_table() {
Playground::setup("filter_from_vcf_test_2", |dirs, sandbox| { Playground::setup("filter_from_vcf_test_2", |dirs, sandbox| {
sandbox.with_files(vec![FileWithContentToBeTrimmed( sandbox.with_files(vec![FileWithContentToBeTrimmed(
"contacts.txt", "contacts.txt",
r#" r"
BEGIN:VCARD BEGIN:VCARD
VERSION:3.0 VERSION:3.0
FN:John Doe FN:John Doe
@ -62,7 +62,7 @@ fn from_vcf_text_to_table() {
NOTE:Facebook: john.doe.3\nWebsite: \nHometown: Cleveland\, Ohio NOTE:Facebook: john.doe.3\nWebsite: \nHometown: Cleveland\, Ohio
CATEGORIES:myContacts CATEGORIES:myContacts
END:VCARD END:VCARD
"#, ",
)]); )]);
let cwd = dirs.test(); let cwd = dirs.test();