From be49547b446cba240f8f2a9592284e77d4a6896f Mon Sep 17 00:00:00 2001 From: Aleksey Kladov Date: Thu, 30 Jul 2020 22:19:58 +0200 Subject: Use split_once polyfill --- crates/ra_project_model/src/cfg_flag.rs | 4 ++-- crates/rust-analyzer/src/cli/analysis_bench.rs | 21 ++++++++++----------- crates/rust-analyzer/src/handlers.rs | 4 ++-- crates/stdx/src/lib.rs | 15 ++++++++++++--- crates/test_utils/src/fixture.rs | 8 ++++---- 5 files changed, 30 insertions(+), 22 deletions(-) (limited to 'crates') diff --git a/crates/ra_project_model/src/cfg_flag.rs b/crates/ra_project_model/src/cfg_flag.rs index 1bc5d4832..bd50056c6 100644 --- a/crates/ra_project_model/src/cfg_flag.rs +++ b/crates/ra_project_model/src/cfg_flag.rs @@ -4,7 +4,7 @@ use std::str::FromStr; use ra_cfg::CfgOptions; -use stdx::split_delim; +use stdx::split_once; #[derive(Clone, Eq, PartialEq, Debug)] pub enum CfgFlag { @@ -15,7 +15,7 @@ pub enum CfgFlag { impl FromStr for CfgFlag { type Err = String; fn from_str(s: &str) -> Result { - let res = match split_delim(s, '=') { + let res = match split_once(s, '=') { Some((key, value)) => { if !(value.starts_with('"') && value.ends_with('"')) { return Err(format!("Invalid cfg ({:?}), value should be in quotes", s)); diff --git a/crates/rust-analyzer/src/cli/analysis_bench.rs b/crates/rust-analyzer/src/cli/analysis_bench.rs index 076184ad6..13a106638 100644 --- a/crates/rust-analyzer/src/cli/analysis_bench.rs +++ b/crates/rust-analyzer/src/cli/analysis_bench.rs @@ -2,7 +2,7 @@ use std::{env, path::Path, str::FromStr, sync::Arc, time::Instant}; -use anyhow::{format_err, Result}; +use anyhow::{bail, format_err, Result}; use ra_db::{ salsa::{Database, Durability}, FileId, @@ -30,19 +30,18 @@ pub struct Position { impl FromStr for Position { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - let (path_line, column) = rsplit_at_char(s, ':')?; - let (path, line) = rsplit_at_char(path_line, ':')?; - let path = env::current_dir().unwrap().join(path); - let path = AbsPathBuf::assert(path); - Ok(Position { path, line: line.parse()?, column: column.parse()? }) + let mut split = s.rsplitn(3, ':'); + match (split.next(), split.next(), split.next()) { + (Some(column), Some(line), Some(path)) => { + let path = env::current_dir().unwrap().join(path); + let path = AbsPathBuf::assert(path); + Ok(Position { path, line: line.parse()?, column: column.parse()? }) + } + _ => bail!("position should be in file:line:column format: {:?}", s), + } } } -fn rsplit_at_char(s: &str, c: char) -> Result<(&str, &str)> { - let idx = s.rfind(c).ok_or_else(|| format_err!("no `{}` in {}", c, s))?; - Ok((&s[..idx], &s[idx + 1..])) -} - pub fn analysis_bench( verbosity: Verbosity, path: &Path, diff --git a/crates/rust-analyzer/src/handlers.rs b/crates/rust-analyzer/src/handlers.rs index 62ed6880b..e73b3a211 100644 --- a/crates/rust-analyzer/src/handlers.rs +++ b/crates/rust-analyzer/src/handlers.rs @@ -26,7 +26,7 @@ use ra_project_model::TargetKind; use ra_syntax::{algo, ast, AstNode, SyntaxKind, TextRange, TextSize}; use serde::{Deserialize, Serialize}; use serde_json::to_value; -use stdx::{format_to, split_delim}; +use stdx::{format_to, split_once}; use crate::{ cargo_target_spec::CargoTargetSpec, @@ -865,7 +865,7 @@ pub(crate) fn handle_resolve_code_action( .map(|it| it.into_iter().filter_map(from_proto::assist_kind).collect()); let assists = snap.analysis.resolved_assists(&snap.config.assist, frange)?; - let (id_string, index) = split_delim(¶ms.id, ':').unwrap(); + let (id_string, index) = split_once(¶ms.id, ':').unwrap(); let index = index.parse::().unwrap(); let assist = &assists[index]; assert!(assist.assist.id.0 == id_string); diff --git a/crates/stdx/src/lib.rs b/crates/stdx/src/lib.rs index ea0e6b949..b65875c96 100644 --- a/crates/stdx/src/lib.rs +++ b/crates/stdx/src/lib.rs @@ -109,9 +109,18 @@ pub fn replace(buf: &mut String, from: char, to: &str) { *buf = buf.replace(from, to) } -pub fn split_delim(haystack: &str, delim: char) -> Option<(&str, &str)> { - let idx = haystack.find(delim)?; - Some((&haystack[..idx], &haystack[idx + delim.len_utf8()..])) +// https://github.com/rust-lang/rust/issues/74773 +pub fn split_once(haystack: &str, delim: char) -> Option<(&str, &str)> { + let mut split = haystack.splitn(2, delim); + let prefix = split.next()?; + let suffix = split.next()?; + Some((prefix, suffix)) +} +pub fn rsplit_once(haystack: &str, delim: char) -> Option<(&str, &str)> { + let mut split = haystack.rsplitn(2, delim); + let suffix = split.next()?; + let prefix = split.next()?; + Some((prefix, suffix)) } pub fn trim_indent(mut text: &str) -> String { diff --git a/crates/test_utils/src/fixture.rs b/crates/test_utils/src/fixture.rs index ed764046b..e40b61a94 100644 --- a/crates/test_utils/src/fixture.rs +++ b/crates/test_utils/src/fixture.rs @@ -2,7 +2,7 @@ //! rust-analyzer database from a single string. use rustc_hash::FxHashMap; -use stdx::{lines_with_ends, split_delim, trim_indent}; +use stdx::{lines_with_ends, split_once, trim_indent}; #[derive(Debug, Eq, PartialEq)] pub struct Fixture { @@ -71,14 +71,14 @@ impl Fixture { let mut cfg_key_values = Vec::new(); let mut env = FxHashMap::default(); for component in components[1..].iter() { - let (key, value) = split_delim(component, ':').unwrap(); + let (key, value) = split_once(component, ':').unwrap(); match key { "crate" => krate = Some(value.to_string()), "deps" => deps = value.split(',').map(|it| it.to_string()).collect(), "edition" => edition = Some(value.to_string()), "cfg" => { for entry in value.split(',') { - match split_delim(entry, '=') { + match split_once(entry, '=') { Some((k, v)) => cfg_key_values.push((k.to_string(), v.to_string())), None => cfg_atoms.push(entry.to_string()), } @@ -86,7 +86,7 @@ impl Fixture { } "env" => { for key in value.split(',') { - if let Some((k, v)) = split_delim(key, '=') { + if let Some((k, v)) = split_once(key, '=') { env.insert(k.into(), v.into()); } } -- cgit v1.2.3 From cd9f863b017a6d568b5ef45f1ce200e3148cfd03 Mon Sep 17 00:00:00 2001 From: Aleksey Kladov Date: Thu, 30 Jul 2020 22:38:24 +0200 Subject: Use CmdArgs pattern for bench & analysis stats --- crates/rust-analyzer/src/bin/args.rs | 33 +- crates/rust-analyzer/src/bin/main.rs | 32 +- crates/rust-analyzer/src/cli.rs | 4 +- crates/rust-analyzer/src/cli/analysis_bench.rs | 116 ++++--- crates/rust-analyzer/src/cli/analysis_stats.rs | 451 +++++++++++++------------ 5 files changed, 305 insertions(+), 331 deletions(-) (limited to 'crates') diff --git a/crates/rust-analyzer/src/bin/args.rs b/crates/rust-analyzer/src/bin/args.rs index 741a2a951..f16e35d86 100644 --- a/crates/rust-analyzer/src/bin/args.rs +++ b/crates/rust-analyzer/src/bin/args.rs @@ -8,7 +8,7 @@ use std::{env, fmt::Write, path::PathBuf}; use anyhow::{bail, Result}; use pico_args::Arguments; use ra_ssr::{SsrPattern, SsrRule}; -use rust_analyzer::cli::{BenchWhat, Position, Verbosity}; +use rust_analyzer::cli::{AnalysisStatsCmd, BenchCmd, BenchWhat, Position, Verbosity}; use vfs::AbsPathBuf; pub(crate) struct Args { @@ -24,23 +24,8 @@ pub(crate) enum Command { Highlight { rainbow: bool, }, - Stats { - randomize: bool, - parallel: bool, - memory_usage: bool, - only: Option, - with_deps: bool, - path: PathBuf, - load_output_dirs: bool, - with_proc_macro: bool, - }, - Bench { - memory_usage: bool, - path: PathBuf, - what: BenchWhat, - load_output_dirs: bool, - with_proc_macro: bool, - }, + AnalysisStats(AnalysisStatsCmd), + Bench(BenchCmd), Diagnostics { path: PathBuf, load_output_dirs: bool, @@ -199,7 +184,7 @@ ARGS: trailing.pop().unwrap().into() }; - Command::Stats { + Command::AnalysisStats(AnalysisStatsCmd { randomize, parallel, memory_usage, @@ -208,7 +193,7 @@ ARGS: path, load_output_dirs, with_proc_macro, - } + }) } "analysis-bench" => { if matches.contains(["-h", "--help"]) { @@ -256,7 +241,13 @@ ARGS: let memory_usage = matches.contains("--memory-usage"); let load_output_dirs = matches.contains("--load-output-dirs"); let with_proc_macro = matches.contains("--with-proc-macro"); - Command::Bench { memory_usage, path, what, load_output_dirs, with_proc_macro } + Command::Bench(BenchCmd { + memory_usage, + path, + what, + load_output_dirs, + with_proc_macro, + }) } "diagnostics" => { if matches.contains(["-h", "--help"]) { diff --git a/crates/rust-analyzer/src/bin/main.rs b/crates/rust-analyzer/src/bin/main.rs index a473c9165..ff8234495 100644 --- a/crates/rust-analyzer/src/bin/main.rs +++ b/crates/rust-analyzer/src/bin/main.rs @@ -33,36 +33,8 @@ fn main() -> Result<()> { args::Command::Parse { no_dump } => cli::parse(no_dump)?, args::Command::Symbols => cli::symbols()?, args::Command::Highlight { rainbow } => cli::highlight(rainbow)?, - args::Command::Stats { - randomize, - parallel, - memory_usage, - only, - with_deps, - path, - load_output_dirs, - with_proc_macro, - } => cli::analysis_stats( - args.verbosity, - memory_usage, - path.as_ref(), - only.as_ref().map(String::as_ref), - with_deps, - randomize, - parallel, - load_output_dirs, - with_proc_macro, - )?, - args::Command::Bench { memory_usage, path, what, load_output_dirs, with_proc_macro } => { - cli::analysis_bench( - args.verbosity, - path.as_ref(), - what, - memory_usage, - load_output_dirs, - with_proc_macro, - )? - } + args::Command::AnalysisStats(cmd) => cmd.run(args.verbosity)?, + args::Command::Bench(cmd) => cmd.run(args.verbosity)?, args::Command::Diagnostics { path, load_output_dirs, with_proc_macro, all } => { cli::diagnostics(path.as_ref(), load_output_dirs, with_proc_macro, all)? } diff --git a/crates/rust-analyzer/src/cli.rs b/crates/rust-analyzer/src/cli.rs index a9b9c8923..1034d11bd 100644 --- a/crates/rust-analyzer/src/cli.rs +++ b/crates/rust-analyzer/src/cli.rs @@ -14,8 +14,8 @@ use ra_ide::Analysis; use ra_prof::profile; use ra_syntax::{AstNode, SourceFile}; -pub use analysis_bench::{analysis_bench, BenchWhat, Position}; -pub use analysis_stats::analysis_stats; +pub use analysis_bench::{BenchCmd, BenchWhat, Position}; +pub use analysis_stats::AnalysisStatsCmd; pub use diagnostics::diagnostics; pub use load_cargo::load_cargo; pub use ssr::{apply_ssr_rules, search_for_patterns}; diff --git a/crates/rust-analyzer/src/cli/analysis_bench.rs b/crates/rust-analyzer/src/cli/analysis_bench.rs index 13a106638..c54ee5f4d 100644 --- a/crates/rust-analyzer/src/cli/analysis_bench.rs +++ b/crates/rust-analyzer/src/cli/analysis_bench.rs @@ -1,6 +1,6 @@ //! Benchmark operations like highlighting or goto definition. -use std::{env, path::Path, str::FromStr, sync::Arc, time::Instant}; +use std::{env, path::PathBuf, str::FromStr, sync::Arc, time::Instant}; use anyhow::{bail, format_err, Result}; use ra_db::{ @@ -15,6 +15,14 @@ use crate::{ print_memory_usage, }; +pub struct BenchCmd { + pub path: PathBuf, + pub what: BenchWhat, + pub memory_usage: bool, + pub load_output_dirs: bool, + pub with_proc_macro: bool, +} + pub enum BenchWhat { Highlight { path: AbsPathBuf }, Complete(Position), @@ -42,72 +50,68 @@ impl FromStr for Position { } } -pub fn analysis_bench( - verbosity: Verbosity, - path: &Path, - what: BenchWhat, - memory_usage: bool, - load_output_dirs: bool, - with_proc_macro: bool, -) -> Result<()> { - ra_prof::init(); - - let start = Instant::now(); - eprint!("loading: "); - let (mut host, vfs) = load_cargo(path, load_output_dirs, with_proc_macro)?; - eprintln!("{:?}\n", start.elapsed()); - - let file_id = { - let path = match &what { - BenchWhat::Highlight { path } => path, - BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => &pos.path, - }; - let path = path.clone().into(); - vfs.file_id(&path).ok_or_else(|| format_err!("Can't find {}", path))? - }; - - match &what { - BenchWhat::Highlight { .. } => { - let res = do_work(&mut host, file_id, |analysis| { - analysis.diagnostics(file_id, true).unwrap(); - analysis.highlight_as_html(file_id, false).unwrap() - }); - if verbosity.is_verbose() { - println!("\n{}", res); - } - } - BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => { - let is_completion = matches!(what, BenchWhat::Complete(..)); +impl BenchCmd { + pub fn run(self, verbosity: Verbosity) -> Result<()> { + ra_prof::init(); + + let start = Instant::now(); + eprint!("loading: "); + let (mut host, vfs) = load_cargo(&self.path, self.load_output_dirs, self.with_proc_macro)?; + eprintln!("{:?}\n", start.elapsed()); - let offset = host - .analysis() - .file_line_index(file_id)? - .offset(LineCol { line: pos.line - 1, col_utf16: pos.column }); - let file_position = FilePosition { file_id, offset }; + let file_id = { + let path = match &self.what { + BenchWhat::Highlight { path } => path, + BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => &pos.path, + }; + let path = path.clone().into(); + vfs.file_id(&path).ok_or_else(|| format_err!("Can't find {}", path))? + }; - if is_completion { - let options = CompletionConfig::default(); + match &self.what { + BenchWhat::Highlight { .. } => { let res = do_work(&mut host, file_id, |analysis| { - analysis.completions(&options, file_position) + analysis.diagnostics(file_id, true).unwrap(); + analysis.highlight_as_html(file_id, false).unwrap() }); if verbosity.is_verbose() { - println!("\n{:#?}", res); + println!("\n{}", res); } - } else { - let res = - do_work(&mut host, file_id, |analysis| analysis.goto_definition(file_position)); - if verbosity.is_verbose() { - println!("\n{:#?}", res); + } + BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => { + let is_completion = matches!(self.what, BenchWhat::Complete(..)); + + let offset = host + .analysis() + .file_line_index(file_id)? + .offset(LineCol { line: pos.line - 1, col_utf16: pos.column }); + let file_position = FilePosition { file_id, offset }; + + if is_completion { + let options = CompletionConfig::default(); + let res = do_work(&mut host, file_id, |analysis| { + analysis.completions(&options, file_position) + }); + if verbosity.is_verbose() { + println!("\n{:#?}", res); + } + } else { + let res = do_work(&mut host, file_id, |analysis| { + analysis.goto_definition(file_position) + }); + if verbosity.is_verbose() { + println!("\n{:#?}", res); + } } } } - } - if memory_usage { - print_memory_usage(host, vfs); - } + if self.memory_usage { + print_memory_usage(host, vfs); + } - Ok(()) + Ok(()) + } } fn do_work T, T>(host: &mut AnalysisHost, file_id: FileId, work: F) -> T { diff --git a/crates/rust-analyzer/src/cli/analysis_stats.rs b/crates/rust-analyzer/src/cli/analysis_stats.rs index a270eb481..721d41a58 100644 --- a/crates/rust-analyzer/src/cli/analysis_stats.rs +++ b/crates/rust-analyzer/src/cli/analysis_stats.rs @@ -2,7 +2,7 @@ //! errors. use std::{ - path::Path, + path::PathBuf, time::{SystemTime, UNIX_EPOCH}, }; @@ -39,273 +39,280 @@ impl Clone for Snap> { } } -pub fn analysis_stats( - verbosity: Verbosity, - memory_usage: bool, - path: &Path, - only: Option<&str>, - with_deps: bool, - randomize: bool, - parallel: bool, - load_output_dirs: bool, - with_proc_macro: bool, -) -> Result<()> { - let mut rng = { - let seed = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64; - Rand32::new(seed) - }; +pub struct AnalysisStatsCmd { + pub randomize: bool, + pub parallel: bool, + pub memory_usage: bool, + pub only: Option, + pub with_deps: bool, + pub path: PathBuf, + pub load_output_dirs: bool, + pub with_proc_macro: bool, +} - let mut db_load_sw = StopWatch::start().memory(memory_usage); - let (host, vfs) = load_cargo(path, load_output_dirs, with_proc_macro)?; - let db = host.raw_database(); - eprintln!("Database loaded {}", db_load_sw.elapsed()); +impl AnalysisStatsCmd { + pub fn run(self, verbosity: Verbosity) -> Result<()> { + let mut rng = { + let seed = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64; + Rand32::new(seed) + }; - let mut analysis_sw = StopWatch::start().memory(memory_usage); - let mut num_crates = 0; - let mut visited_modules = FxHashSet::default(); - let mut visit_queue = Vec::new(); + let mut db_load_sw = self.stop_watch(); + let (host, vfs) = load_cargo(&self.path, self.load_output_dirs, self.with_proc_macro)?; + let db = host.raw_database(); + eprintln!("Database loaded {}", db_load_sw.elapsed()); - let mut krates = Crate::all(db); - if randomize { - shuffle(&mut rng, &mut krates); - } - for krate in krates { - let module = krate.root_module(db).expect("crate without root module"); - let file_id = module.definition_source(db).file_id; - let file_id = file_id.original_file(db); - let source_root = db.file_source_root(file_id); - let source_root = db.source_root(source_root); - if !source_root.is_library || with_deps { - num_crates += 1; - visit_queue.push(module); - } - } + let mut analysis_sw = self.stop_watch(); + let mut num_crates = 0; + let mut visited_modules = FxHashSet::default(); + let mut visit_queue = Vec::new(); - if randomize { - shuffle(&mut rng, &mut visit_queue); - } + let mut krates = Crate::all(db); + if self.randomize { + shuffle(&mut rng, &mut krates); + } + for krate in krates { + let module = krate.root_module(db).expect("crate without root module"); + let file_id = module.definition_source(db).file_id; + let file_id = file_id.original_file(db); + let source_root = db.file_source_root(file_id); + let source_root = db.source_root(source_root); + if !source_root.is_library || self.with_deps { + num_crates += 1; + visit_queue.push(module); + } + } - eprintln!("Crates in this dir: {}", num_crates); - let mut num_decls = 0; - let mut funcs = Vec::new(); - while let Some(module) = visit_queue.pop() { - if visited_modules.insert(module) { - visit_queue.extend(module.children(db)); + if self.randomize { + shuffle(&mut rng, &mut visit_queue); + } - for decl in module.declarations(db) { - num_decls += 1; - if let ModuleDef::Function(f) = decl { - funcs.push(f); - } - } + eprintln!("Crates in this dir: {}", num_crates); + let mut num_decls = 0; + let mut funcs = Vec::new(); + while let Some(module) = visit_queue.pop() { + if visited_modules.insert(module) { + visit_queue.extend(module.children(db)); - for impl_def in module.impl_defs(db) { - for item in impl_def.items(db) { + for decl in module.declarations(db) { num_decls += 1; - if let AssocItem::Function(f) = item { + if let ModuleDef::Function(f) = decl { funcs.push(f); } } + + for impl_def in module.impl_defs(db) { + for item in impl_def.items(db) { + num_decls += 1; + if let AssocItem::Function(f) = item { + funcs.push(f); + } + } + } } } - } - eprintln!("Total modules found: {}", visited_modules.len()); - eprintln!("Total declarations: {}", num_decls); - eprintln!("Total functions: {}", funcs.len()); - eprintln!("Item Collection: {}", analysis_sw.elapsed()); - - if randomize { - shuffle(&mut rng, &mut funcs); - } + eprintln!("Total modules found: {}", visited_modules.len()); + eprintln!("Total declarations: {}", num_decls); + eprintln!("Total functions: {}", funcs.len()); + eprintln!("Item Collection: {}", analysis_sw.elapsed()); - let mut bar = match verbosity { - Verbosity::Quiet | Verbosity::Spammy => ProgressReport::hidden(), - _ if parallel => ProgressReport::hidden(), - _ => ProgressReport::new(funcs.len() as u64), - }; + if self.randomize { + shuffle(&mut rng, &mut funcs); + } - if parallel { - let mut inference_sw = StopWatch::start().memory(memory_usage); - let snap = Snap(db.snapshot()); - funcs - .par_iter() - .map_with(snap, |snap, &f| { - let f_id = FunctionId::from(f); - snap.0.body(f_id.into()); - snap.0.infer(f_id.into()); - }) - .count(); - eprintln!("Parallel Inference: {}", inference_sw.elapsed()); - } + let mut bar = match verbosity { + Verbosity::Quiet | Verbosity::Spammy => ProgressReport::hidden(), + _ if self.parallel => ProgressReport::hidden(), + _ => ProgressReport::new(funcs.len() as u64), + }; - let mut inference_sw = StopWatch::start().memory(memory_usage); - bar.tick(); - let mut num_exprs = 0; - let mut num_exprs_unknown = 0; - let mut num_exprs_partially_unknown = 0; - let mut num_type_mismatches = 0; - for f in funcs { - let name = f.name(db); - let full_name = f - .module(db) - .path_to_root(db) - .into_iter() - .rev() - .filter_map(|it| it.name(db)) - .chain(Some(f.name(db))) - .join("::"); - if let Some(only_name) = only { - if name.to_string() != only_name && full_name != only_name { - continue; - } - } - let mut msg = format!("processing: {}", full_name); - if verbosity.is_verbose() { - let src = f.source(db); - let original_file = src.file_id.original_file(db); - let path = vfs.file_path(original_file); - let syntax_range = src.value.syntax().text_range(); - format_to!(msg, " ({} {:?})", path, syntax_range); + if self.parallel { + let mut inference_sw = self.stop_watch(); + let snap = Snap(db.snapshot()); + funcs + .par_iter() + .map_with(snap, |snap, &f| { + let f_id = FunctionId::from(f); + snap.0.body(f_id.into()); + snap.0.infer(f_id.into()); + }) + .count(); + eprintln!("Parallel Inference: {}", inference_sw.elapsed()); } - if verbosity.is_spammy() { - bar.println(msg.to_string()); - } - bar.set_message(&msg); - let f_id = FunctionId::from(f); - let body = db.body(f_id.into()); - let inference_result = db.infer(f_id.into()); - let (previous_exprs, previous_unknown, previous_partially_unknown) = - (num_exprs, num_exprs_unknown, num_exprs_partially_unknown); - for (expr_id, _) in body.exprs.iter() { - let ty = &inference_result[expr_id]; - num_exprs += 1; - if let Ty::Unknown = ty { - num_exprs_unknown += 1; - } else { - let mut is_partially_unknown = false; - ty.walk(&mut |ty| { - if let Ty::Unknown = ty { - is_partially_unknown = true; - } - }); - if is_partially_unknown { - num_exprs_partially_unknown += 1; + + let mut inference_sw = self.stop_watch(); + bar.tick(); + let mut num_exprs = 0; + let mut num_exprs_unknown = 0; + let mut num_exprs_partially_unknown = 0; + let mut num_type_mismatches = 0; + for f in funcs { + let name = f.name(db); + let full_name = f + .module(db) + .path_to_root(db) + .into_iter() + .rev() + .filter_map(|it| it.name(db)) + .chain(Some(f.name(db))) + .join("::"); + if let Some(only_name) = self.only.as_deref() { + if name.to_string() != only_name && full_name != only_name { + continue; } } - if only.is_some() && verbosity.is_spammy() { - // in super-verbose mode for just one function, we print every single expression - let (_, sm) = db.body_with_source_map(f_id.into()); - let src = sm.expr_syntax(expr_id); - if let Ok(src) = src { - let node = { - let root = db.parse_or_expand(src.file_id).unwrap(); - src.value.to_node(&root) - }; - let original_file = src.file_id.original_file(db); - let line_index = host.analysis().file_line_index(original_file).unwrap(); - let text_range = node.syntax().text_range(); - let (start, end) = ( - line_index.line_col(text_range.start()), - line_index.line_col(text_range.end()), - ); - bar.println(format!( - "{}:{}-{}:{}: {}", - start.line + 1, - start.col_utf16, - end.line + 1, - end.col_utf16, - ty.display(db) - )); + let mut msg = format!("processing: {}", full_name); + if verbosity.is_verbose() { + let src = f.source(db); + let original_file = src.file_id.original_file(db); + let path = vfs.file_path(original_file); + let syntax_range = src.value.syntax().text_range(); + format_to!(msg, " ({} {:?})", path, syntax_range); + } + if verbosity.is_spammy() { + bar.println(msg.to_string()); + } + bar.set_message(&msg); + let f_id = FunctionId::from(f); + let body = db.body(f_id.into()); + let inference_result = db.infer(f_id.into()); + let (previous_exprs, previous_unknown, previous_partially_unknown) = + (num_exprs, num_exprs_unknown, num_exprs_partially_unknown); + for (expr_id, _) in body.exprs.iter() { + let ty = &inference_result[expr_id]; + num_exprs += 1; + if let Ty::Unknown = ty { + num_exprs_unknown += 1; } else { - bar.println(format!("unknown location: {}", ty.display(db))); + let mut is_partially_unknown = false; + ty.walk(&mut |ty| { + if let Ty::Unknown = ty { + is_partially_unknown = true; + } + }); + if is_partially_unknown { + num_exprs_partially_unknown += 1; + } } - } - if let Some(mismatch) = inference_result.type_mismatch_for_expr(expr_id) { - num_type_mismatches += 1; - if verbosity.is_verbose() { + if self.only.is_some() && verbosity.is_spammy() { + // in super-verbose mode for just one function, we print every single expression let (_, sm) = db.body_with_source_map(f_id.into()); let src = sm.expr_syntax(expr_id); if let Ok(src) = src { - // FIXME: it might be nice to have a function (on Analysis?) that goes from Source -> (LineCol, LineCol) directly - // But also, we should just turn the type mismatches into diagnostics and provide these - let root = db.parse_or_expand(src.file_id).unwrap(); - let node = src.map(|e| e.to_node(&root).syntax().clone()); - let original_range = original_range(db, node.as_ref()); - let path = vfs.file_path(original_range.file_id); - let line_index = - host.analysis().file_line_index(original_range.file_id).unwrap(); - let text_range = original_range.range; + let node = { + let root = db.parse_or_expand(src.file_id).unwrap(); + src.value.to_node(&root) + }; + let original_file = src.file_id.original_file(db); + let line_index = host.analysis().file_line_index(original_file).unwrap(); + let text_range = node.syntax().text_range(); let (start, end) = ( line_index.line_col(text_range.start()), line_index.line_col(text_range.end()), ); bar.println(format!( - "{} {}:{}-{}:{}: Expected {}, got {}", - path, + "{}:{}-{}:{}: {}", start.line + 1, start.col_utf16, end.line + 1, end.col_utf16, - mismatch.expected.display(db), - mismatch.actual.display(db) + ty.display(db) )); } else { - bar.println(format!( - "{}: Expected {}, got {}", - name, - mismatch.expected.display(db), - mismatch.actual.display(db) - )); + bar.println(format!("unknown location: {}", ty.display(db))); + } + } + if let Some(mismatch) = inference_result.type_mismatch_for_expr(expr_id) { + num_type_mismatches += 1; + if verbosity.is_verbose() { + let (_, sm) = db.body_with_source_map(f_id.into()); + let src = sm.expr_syntax(expr_id); + if let Ok(src) = src { + // FIXME: it might be nice to have a function (on Analysis?) that goes from Source -> (LineCol, LineCol) directly + // But also, we should just turn the type mismatches into diagnostics and provide these + let root = db.parse_or_expand(src.file_id).unwrap(); + let node = src.map(|e| e.to_node(&root).syntax().clone()); + let original_range = original_range(db, node.as_ref()); + let path = vfs.file_path(original_range.file_id); + let line_index = + host.analysis().file_line_index(original_range.file_id).unwrap(); + let text_range = original_range.range; + let (start, end) = ( + line_index.line_col(text_range.start()), + line_index.line_col(text_range.end()), + ); + bar.println(format!( + "{} {}:{}-{}:{}: Expected {}, got {}", + path, + start.line + 1, + start.col_utf16, + end.line + 1, + end.col_utf16, + mismatch.expected.display(db), + mismatch.actual.display(db) + )); + } else { + bar.println(format!( + "{}: Expected {}, got {}", + name, + mismatch.expected.display(db), + mismatch.actual.display(db) + )); + } } } } + if verbosity.is_spammy() { + bar.println(format!( + "In {}: {} exprs, {} unknown, {} partial", + full_name, + num_exprs - previous_exprs, + num_exprs_unknown - previous_unknown, + num_exprs_partially_unknown - previous_partially_unknown + )); + } + bar.inc(1); } - if verbosity.is_spammy() { - bar.println(format!( - "In {}: {} exprs, {} unknown, {} partial", - full_name, - num_exprs - previous_exprs, - num_exprs_unknown - previous_unknown, - num_exprs_partially_unknown - previous_partially_unknown - )); - } - bar.inc(1); - } - bar.finish_and_clear(); - eprintln!("Total expressions: {}", num_exprs); - eprintln!( - "Expressions of unknown type: {} ({}%)", - num_exprs_unknown, - if num_exprs > 0 { num_exprs_unknown * 100 / num_exprs } else { 100 } - ); - report_metric("unknown type", num_exprs_unknown, "#"); + bar.finish_and_clear(); + eprintln!("Total expressions: {}", num_exprs); + eprintln!( + "Expressions of unknown type: {} ({}%)", + num_exprs_unknown, + if num_exprs > 0 { num_exprs_unknown * 100 / num_exprs } else { 100 } + ); + report_metric("unknown type", num_exprs_unknown, "#"); - eprintln!( - "Expressions of partially unknown type: {} ({}%)", - num_exprs_partially_unknown, - if num_exprs > 0 { num_exprs_partially_unknown * 100 / num_exprs } else { 100 } - ); + eprintln!( + "Expressions of partially unknown type: {} ({}%)", + num_exprs_partially_unknown, + if num_exprs > 0 { num_exprs_partially_unknown * 100 / num_exprs } else { 100 } + ); - eprintln!("Type mismatches: {}", num_type_mismatches); - report_metric("type mismatches", num_type_mismatches, "#"); + eprintln!("Type mismatches: {}", num_type_mismatches); + report_metric("type mismatches", num_type_mismatches, "#"); - eprintln!("Inference: {}", inference_sw.elapsed()); + eprintln!("Inference: {}", inference_sw.elapsed()); - let total_span = analysis_sw.elapsed(); - eprintln!("Total: {}", total_span); - report_metric("total time", total_span.time.as_millis() as u64, "ms"); - if let Some(instructions) = total_span.instructions { - report_metric("total instructions", instructions, "#instr"); - } - if let Some(memory) = total_span.memory { - report_metric("total memory", memory.allocated.megabytes() as u64, "MB"); - } + let total_span = analysis_sw.elapsed(); + eprintln!("Total: {}", total_span); + report_metric("total time", total_span.time.as_millis() as u64, "ms"); + if let Some(instructions) = total_span.instructions { + report_metric("total instructions", instructions, "#instr"); + } + if let Some(memory) = total_span.memory { + report_metric("total memory", memory.allocated.megabytes() as u64, "MB"); + } - if memory_usage { - print_memory_usage(host, vfs); + if self.memory_usage { + print_memory_usage(host, vfs); + } + + Ok(()) } - Ok(()) + fn stop_watch(&self) -> StopWatch { + StopWatch::start().memory(self.memory_usage) + } } fn shuffle(rng: &mut Rand32, slice: &mut [T]) { -- cgit v1.2.3