diff options
Diffstat (limited to 'crates')
-rw-r--r-- | crates/rust-analyzer/src/bin/args.rs | 33 | ||||
-rw-r--r-- | crates/rust-analyzer/src/bin/main.rs | 32 | ||||
-rw-r--r-- | crates/rust-analyzer/src/cli.rs | 4 | ||||
-rw-r--r-- | crates/rust-analyzer/src/cli/analysis_bench.rs | 116 | ||||
-rw-r--r-- | crates/rust-analyzer/src/cli/analysis_stats.rs | 451 |
5 files changed, 305 insertions, 331 deletions
diff --git a/crates/rust-analyzer/src/bin/args.rs b/crates/rust-analyzer/src/bin/args.rs index 741a2a951..f16e35d86 100644 --- a/crates/rust-analyzer/src/bin/args.rs +++ b/crates/rust-analyzer/src/bin/args.rs | |||
@@ -8,7 +8,7 @@ use std::{env, fmt::Write, path::PathBuf}; | |||
8 | use anyhow::{bail, Result}; | 8 | use anyhow::{bail, Result}; |
9 | use pico_args::Arguments; | 9 | use pico_args::Arguments; |
10 | use ra_ssr::{SsrPattern, SsrRule}; | 10 | use ra_ssr::{SsrPattern, SsrRule}; |
11 | use rust_analyzer::cli::{BenchWhat, Position, Verbosity}; | 11 | use rust_analyzer::cli::{AnalysisStatsCmd, BenchCmd, BenchWhat, Position, Verbosity}; |
12 | use vfs::AbsPathBuf; | 12 | use vfs::AbsPathBuf; |
13 | 13 | ||
14 | pub(crate) struct Args { | 14 | pub(crate) struct Args { |
@@ -24,23 +24,8 @@ pub(crate) enum Command { | |||
24 | Highlight { | 24 | Highlight { |
25 | rainbow: bool, | 25 | rainbow: bool, |
26 | }, | 26 | }, |
27 | Stats { | 27 | AnalysisStats(AnalysisStatsCmd), |
28 | randomize: bool, | 28 | Bench(BenchCmd), |
29 | parallel: bool, | ||
30 | memory_usage: bool, | ||
31 | only: Option<String>, | ||
32 | with_deps: bool, | ||
33 | path: PathBuf, | ||
34 | load_output_dirs: bool, | ||
35 | with_proc_macro: bool, | ||
36 | }, | ||
37 | Bench { | ||
38 | memory_usage: bool, | ||
39 | path: PathBuf, | ||
40 | what: BenchWhat, | ||
41 | load_output_dirs: bool, | ||
42 | with_proc_macro: bool, | ||
43 | }, | ||
44 | Diagnostics { | 29 | Diagnostics { |
45 | path: PathBuf, | 30 | path: PathBuf, |
46 | load_output_dirs: bool, | 31 | load_output_dirs: bool, |
@@ -199,7 +184,7 @@ ARGS: | |||
199 | trailing.pop().unwrap().into() | 184 | trailing.pop().unwrap().into() |
200 | }; | 185 | }; |
201 | 186 | ||
202 | Command::Stats { | 187 | Command::AnalysisStats(AnalysisStatsCmd { |
203 | randomize, | 188 | randomize, |
204 | parallel, | 189 | parallel, |
205 | memory_usage, | 190 | memory_usage, |
@@ -208,7 +193,7 @@ ARGS: | |||
208 | path, | 193 | path, |
209 | load_output_dirs, | 194 | load_output_dirs, |
210 | with_proc_macro, | 195 | with_proc_macro, |
211 | } | 196 | }) |
212 | } | 197 | } |
213 | "analysis-bench" => { | 198 | "analysis-bench" => { |
214 | if matches.contains(["-h", "--help"]) { | 199 | if matches.contains(["-h", "--help"]) { |
@@ -256,7 +241,13 @@ ARGS: | |||
256 | let memory_usage = matches.contains("--memory-usage"); | 241 | let memory_usage = matches.contains("--memory-usage"); |
257 | let load_output_dirs = matches.contains("--load-output-dirs"); | 242 | let load_output_dirs = matches.contains("--load-output-dirs"); |
258 | let with_proc_macro = matches.contains("--with-proc-macro"); | 243 | let with_proc_macro = matches.contains("--with-proc-macro"); |
259 | Command::Bench { memory_usage, path, what, load_output_dirs, with_proc_macro } | 244 | Command::Bench(BenchCmd { |
245 | memory_usage, | ||
246 | path, | ||
247 | what, | ||
248 | load_output_dirs, | ||
249 | with_proc_macro, | ||
250 | }) | ||
260 | } | 251 | } |
261 | "diagnostics" => { | 252 | "diagnostics" => { |
262 | if matches.contains(["-h", "--help"]) { | 253 | if matches.contains(["-h", "--help"]) { |
diff --git a/crates/rust-analyzer/src/bin/main.rs b/crates/rust-analyzer/src/bin/main.rs index a473c9165..ff8234495 100644 --- a/crates/rust-analyzer/src/bin/main.rs +++ b/crates/rust-analyzer/src/bin/main.rs | |||
@@ -33,36 +33,8 @@ fn main() -> Result<()> { | |||
33 | args::Command::Parse { no_dump } => cli::parse(no_dump)?, | 33 | args::Command::Parse { no_dump } => cli::parse(no_dump)?, |
34 | args::Command::Symbols => cli::symbols()?, | 34 | args::Command::Symbols => cli::symbols()?, |
35 | args::Command::Highlight { rainbow } => cli::highlight(rainbow)?, | 35 | args::Command::Highlight { rainbow } => cli::highlight(rainbow)?, |
36 | args::Command::Stats { | 36 | args::Command::AnalysisStats(cmd) => cmd.run(args.verbosity)?, |
37 | randomize, | 37 | args::Command::Bench(cmd) => cmd.run(args.verbosity)?, |
38 | parallel, | ||
39 | memory_usage, | ||
40 | only, | ||
41 | with_deps, | ||
42 | path, | ||
43 | load_output_dirs, | ||
44 | with_proc_macro, | ||
45 | } => cli::analysis_stats( | ||
46 | args.verbosity, | ||
47 | memory_usage, | ||
48 | path.as_ref(), | ||
49 | only.as_ref().map(String::as_ref), | ||
50 | with_deps, | ||
51 | randomize, | ||
52 | parallel, | ||
53 | load_output_dirs, | ||
54 | with_proc_macro, | ||
55 | )?, | ||
56 | args::Command::Bench { memory_usage, path, what, load_output_dirs, with_proc_macro } => { | ||
57 | cli::analysis_bench( | ||
58 | args.verbosity, | ||
59 | path.as_ref(), | ||
60 | what, | ||
61 | memory_usage, | ||
62 | load_output_dirs, | ||
63 | with_proc_macro, | ||
64 | )? | ||
65 | } | ||
66 | args::Command::Diagnostics { path, load_output_dirs, with_proc_macro, all } => { | 38 | args::Command::Diagnostics { path, load_output_dirs, with_proc_macro, all } => { |
67 | cli::diagnostics(path.as_ref(), load_output_dirs, with_proc_macro, all)? | 39 | cli::diagnostics(path.as_ref(), load_output_dirs, with_proc_macro, all)? |
68 | } | 40 | } |
diff --git a/crates/rust-analyzer/src/cli.rs b/crates/rust-analyzer/src/cli.rs index a9b9c8923..1034d11bd 100644 --- a/crates/rust-analyzer/src/cli.rs +++ b/crates/rust-analyzer/src/cli.rs | |||
@@ -14,8 +14,8 @@ use ra_ide::Analysis; | |||
14 | use ra_prof::profile; | 14 | use ra_prof::profile; |
15 | use ra_syntax::{AstNode, SourceFile}; | 15 | use ra_syntax::{AstNode, SourceFile}; |
16 | 16 | ||
17 | pub use analysis_bench::{analysis_bench, BenchWhat, Position}; | 17 | pub use analysis_bench::{BenchCmd, BenchWhat, Position}; |
18 | pub use analysis_stats::analysis_stats; | 18 | pub use analysis_stats::AnalysisStatsCmd; |
19 | pub use diagnostics::diagnostics; | 19 | pub use diagnostics::diagnostics; |
20 | pub use load_cargo::load_cargo; | 20 | pub use load_cargo::load_cargo; |
21 | pub use ssr::{apply_ssr_rules, search_for_patterns}; | 21 | pub use ssr::{apply_ssr_rules, search_for_patterns}; |
diff --git a/crates/rust-analyzer/src/cli/analysis_bench.rs b/crates/rust-analyzer/src/cli/analysis_bench.rs index 13a106638..c54ee5f4d 100644 --- a/crates/rust-analyzer/src/cli/analysis_bench.rs +++ b/crates/rust-analyzer/src/cli/analysis_bench.rs | |||
@@ -1,6 +1,6 @@ | |||
1 | //! Benchmark operations like highlighting or goto definition. | 1 | //! Benchmark operations like highlighting or goto definition. |
2 | 2 | ||
3 | use std::{env, path::Path, str::FromStr, sync::Arc, time::Instant}; | 3 | use std::{env, path::PathBuf, str::FromStr, sync::Arc, time::Instant}; |
4 | 4 | ||
5 | use anyhow::{bail, format_err, Result}; | 5 | use anyhow::{bail, format_err, Result}; |
6 | use ra_db::{ | 6 | use ra_db::{ |
@@ -15,6 +15,14 @@ use crate::{ | |||
15 | print_memory_usage, | 15 | print_memory_usage, |
16 | }; | 16 | }; |
17 | 17 | ||
18 | pub struct BenchCmd { | ||
19 | pub path: PathBuf, | ||
20 | pub what: BenchWhat, | ||
21 | pub memory_usage: bool, | ||
22 | pub load_output_dirs: bool, | ||
23 | pub with_proc_macro: bool, | ||
24 | } | ||
25 | |||
18 | pub enum BenchWhat { | 26 | pub enum BenchWhat { |
19 | Highlight { path: AbsPathBuf }, | 27 | Highlight { path: AbsPathBuf }, |
20 | Complete(Position), | 28 | Complete(Position), |
@@ -42,72 +50,68 @@ impl FromStr for Position { | |||
42 | } | 50 | } |
43 | } | 51 | } |
44 | 52 | ||
45 | pub fn analysis_bench( | 53 | impl BenchCmd { |
46 | verbosity: Verbosity, | 54 | pub fn run(self, verbosity: Verbosity) -> Result<()> { |
47 | path: &Path, | 55 | ra_prof::init(); |
48 | what: BenchWhat, | 56 | |
49 | memory_usage: bool, | 57 | let start = Instant::now(); |
50 | load_output_dirs: bool, | 58 | eprint!("loading: "); |
51 | with_proc_macro: bool, | 59 | let (mut host, vfs) = load_cargo(&self.path, self.load_output_dirs, self.with_proc_macro)?; |
52 | ) -> Result<()> { | 60 | eprintln!("{:?}\n", start.elapsed()); |
53 | ra_prof::init(); | ||
54 | |||
55 | let start = Instant::now(); | ||
56 | eprint!("loading: "); | ||
57 | let (mut host, vfs) = load_cargo(path, load_output_dirs, with_proc_macro)?; | ||
58 | eprintln!("{:?}\n", start.elapsed()); | ||
59 | |||
60 | let file_id = { | ||
61 | let path = match &what { | ||
62 | BenchWhat::Highlight { path } => path, | ||
63 | BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => &pos.path, | ||
64 | }; | ||
65 | let path = path.clone().into(); | ||
66 | vfs.file_id(&path).ok_or_else(|| format_err!("Can't find {}", path))? | ||
67 | }; | ||
68 | |||
69 | match &what { | ||
70 | BenchWhat::Highlight { .. } => { | ||
71 | let res = do_work(&mut host, file_id, |analysis| { | ||
72 | analysis.diagnostics(file_id, true).unwrap(); | ||
73 | analysis.highlight_as_html(file_id, false).unwrap() | ||
74 | }); | ||
75 | if verbosity.is_verbose() { | ||
76 | println!("\n{}", res); | ||
77 | } | ||
78 | } | ||
79 | BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => { | ||
80 | let is_completion = matches!(what, BenchWhat::Complete(..)); | ||
81 | 61 | ||
82 | let offset = host | 62 | let file_id = { |
83 | .analysis() | 63 | let path = match &self.what { |
84 | .file_line_index(file_id)? | 64 | BenchWhat::Highlight { path } => path, |
85 | .offset(LineCol { line: pos.line - 1, col_utf16: pos.column }); | 65 | BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => &pos.path, |
86 | let file_position = FilePosition { file_id, offset }; | 66 | }; |
67 | let path = path.clone().into(); | ||
68 | vfs.file_id(&path).ok_or_else(|| format_err!("Can't find {}", path))? | ||
69 | }; | ||
87 | 70 | ||
88 | if is_completion { | 71 | match &self.what { |
89 | let options = CompletionConfig::default(); | 72 | BenchWhat::Highlight { .. } => { |
90 | let res = do_work(&mut host, file_id, |analysis| { | 73 | let res = do_work(&mut host, file_id, |analysis| { |
91 | analysis.completions(&options, file_position) | 74 | analysis.diagnostics(file_id, true).unwrap(); |
75 | analysis.highlight_as_html(file_id, false).unwrap() | ||
92 | }); | 76 | }); |
93 | if verbosity.is_verbose() { | 77 | if verbosity.is_verbose() { |
94 | println!("\n{:#?}", res); | 78 | println!("\n{}", res); |
95 | } | 79 | } |
96 | } else { | 80 | } |
97 | let res = | 81 | BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => { |
98 | do_work(&mut host, file_id, |analysis| analysis.goto_definition(file_position)); | 82 | let is_completion = matches!(self.what, BenchWhat::Complete(..)); |
99 | if verbosity.is_verbose() { | 83 | |
100 | println!("\n{:#?}", res); | 84 | let offset = host |
85 | .analysis() | ||
86 | .file_line_index(file_id)? | ||
87 | .offset(LineCol { line: pos.line - 1, col_utf16: pos.column }); | ||
88 | let file_position = FilePosition { file_id, offset }; | ||
89 | |||
90 | if is_completion { | ||
91 | let options = CompletionConfig::default(); | ||
92 | let res = do_work(&mut host, file_id, |analysis| { | ||
93 | analysis.completions(&options, file_position) | ||
94 | }); | ||
95 | if verbosity.is_verbose() { | ||
96 | println!("\n{:#?}", res); | ||
97 | } | ||
98 | } else { | ||
99 | let res = do_work(&mut host, file_id, |analysis| { | ||
100 | analysis.goto_definition(file_position) | ||
101 | }); | ||
102 | if verbosity.is_verbose() { | ||
103 | println!("\n{:#?}", res); | ||
104 | } | ||
101 | } | 105 | } |
102 | } | 106 | } |
103 | } | 107 | } |
104 | } | ||
105 | 108 | ||
106 | if memory_usage { | 109 | if self.memory_usage { |
107 | print_memory_usage(host, vfs); | 110 | print_memory_usage(host, vfs); |
108 | } | 111 | } |
109 | 112 | ||
110 | Ok(()) | 113 | Ok(()) |
114 | } | ||
111 | } | 115 | } |
112 | 116 | ||
113 | fn do_work<F: Fn(&Analysis) -> T, T>(host: &mut AnalysisHost, file_id: FileId, work: F) -> T { | 117 | fn do_work<F: Fn(&Analysis) -> T, T>(host: &mut AnalysisHost, file_id: FileId, work: F) -> T { |
diff --git a/crates/rust-analyzer/src/cli/analysis_stats.rs b/crates/rust-analyzer/src/cli/analysis_stats.rs index a270eb481..721d41a58 100644 --- a/crates/rust-analyzer/src/cli/analysis_stats.rs +++ b/crates/rust-analyzer/src/cli/analysis_stats.rs | |||
@@ -2,7 +2,7 @@ | |||
2 | //! errors. | 2 | //! errors. |
3 | 3 | ||
4 | use std::{ | 4 | use std::{ |
5 | path::Path, | 5 | path::PathBuf, |
6 | time::{SystemTime, UNIX_EPOCH}, | 6 | time::{SystemTime, UNIX_EPOCH}, |
7 | }; | 7 | }; |
8 | 8 | ||
@@ -39,273 +39,280 @@ impl<DB: ParallelDatabase> Clone for Snap<salsa::Snapshot<DB>> { | |||
39 | } | 39 | } |
40 | } | 40 | } |
41 | 41 | ||
42 | pub fn analysis_stats( | 42 | pub struct AnalysisStatsCmd { |
43 | verbosity: Verbosity, | 43 | pub randomize: bool, |
44 | memory_usage: bool, | 44 | pub parallel: bool, |
45 | path: &Path, | 45 | pub memory_usage: bool, |
46 | only: Option<&str>, | 46 | pub only: Option<String>, |
47 | with_deps: bool, | 47 | pub with_deps: bool, |
48 | randomize: bool, | 48 | pub path: PathBuf, |
49 | parallel: bool, | 49 | pub load_output_dirs: bool, |
50 | load_output_dirs: bool, | 50 | pub with_proc_macro: bool, |
51 | with_proc_macro: bool, | 51 | } |
52 | ) -> Result<()> { | ||
53 | let mut rng = { | ||
54 | let seed = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64; | ||
55 | Rand32::new(seed) | ||
56 | }; | ||
57 | 52 | ||
58 | let mut db_load_sw = StopWatch::start().memory(memory_usage); | 53 | impl AnalysisStatsCmd { |
59 | let (host, vfs) = load_cargo(path, load_output_dirs, with_proc_macro)?; | 54 | pub fn run(self, verbosity: Verbosity) -> Result<()> { |
60 | let db = host.raw_database(); | 55 | let mut rng = { |
61 | eprintln!("Database loaded {}", db_load_sw.elapsed()); | 56 | let seed = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64; |
57 | Rand32::new(seed) | ||
58 | }; | ||
62 | 59 | ||
63 | let mut analysis_sw = StopWatch::start().memory(memory_usage); | 60 | let mut db_load_sw = self.stop_watch(); |
64 | let mut num_crates = 0; | 61 | let (host, vfs) = load_cargo(&self.path, self.load_output_dirs, self.with_proc_macro)?; |
65 | let mut visited_modules = FxHashSet::default(); | 62 | let db = host.raw_database(); |
66 | let mut visit_queue = Vec::new(); | 63 | eprintln!("Database loaded {}", db_load_sw.elapsed()); |
67 | 64 | ||
68 | let mut krates = Crate::all(db); | 65 | let mut analysis_sw = self.stop_watch(); |
69 | if randomize { | 66 | let mut num_crates = 0; |
70 | shuffle(&mut rng, &mut krates); | 67 | let mut visited_modules = FxHashSet::default(); |
71 | } | 68 | let mut visit_queue = Vec::new(); |
72 | for krate in krates { | ||
73 | let module = krate.root_module(db).expect("crate without root module"); | ||
74 | let file_id = module.definition_source(db).file_id; | ||
75 | let file_id = file_id.original_file(db); | ||
76 | let source_root = db.file_source_root(file_id); | ||
77 | let source_root = db.source_root(source_root); | ||
78 | if !source_root.is_library || with_deps { | ||
79 | num_crates += 1; | ||
80 | visit_queue.push(module); | ||
81 | } | ||
82 | } | ||
83 | 69 | ||
84 | if randomize { | 70 | let mut krates = Crate::all(db); |
85 | shuffle(&mut rng, &mut visit_queue); | 71 | if self.randomize { |
86 | } | 72 | shuffle(&mut rng, &mut krates); |
73 | } | ||
74 | for krate in krates { | ||
75 | let module = krate.root_module(db).expect("crate without root module"); | ||
76 | let file_id = module.definition_source(db).file_id; | ||
77 | let file_id = file_id.original_file(db); | ||
78 | let source_root = db.file_source_root(file_id); | ||
79 | let source_root = db.source_root(source_root); | ||
80 | if !source_root.is_library || self.with_deps { | ||
81 | num_crates += 1; | ||
82 | visit_queue.push(module); | ||
83 | } | ||
84 | } | ||
87 | 85 | ||
88 | eprintln!("Crates in this dir: {}", num_crates); | 86 | if self.randomize { |
89 | let mut num_decls = 0; | 87 | shuffle(&mut rng, &mut visit_queue); |
90 | let mut funcs = Vec::new(); | 88 | } |
91 | while let Some(module) = visit_queue.pop() { | ||
92 | if visited_modules.insert(module) { | ||
93 | visit_queue.extend(module.children(db)); | ||
94 | 89 | ||
95 | for decl in module.declarations(db) { | 90 | eprintln!("Crates in this dir: {}", num_crates); |
96 | num_decls += 1; | 91 | let mut num_decls = 0; |
97 | if let ModuleDef::Function(f) = decl { | 92 | let mut funcs = Vec::new(); |
98 | funcs.push(f); | 93 | while let Some(module) = visit_queue.pop() { |
99 | } | 94 | if visited_modules.insert(module) { |
100 | } | 95 | visit_queue.extend(module.children(db)); |
101 | 96 | ||
102 | for impl_def in module.impl_defs(db) { | 97 | for decl in module.declarations(db) { |
103 | for item in impl_def.items(db) { | ||
104 | num_decls += 1; | 98 | num_decls += 1; |
105 | if let AssocItem::Function(f) = item { | 99 | if let ModuleDef::Function(f) = decl { |
106 | funcs.push(f); | 100 | funcs.push(f); |
107 | } | 101 | } |
108 | } | 102 | } |
103 | |||
104 | for impl_def in module.impl_defs(db) { | ||
105 | for item in impl_def.items(db) { | ||
106 | num_decls += 1; | ||
107 | if let AssocItem::Function(f) = item { | ||
108 | funcs.push(f); | ||
109 | } | ||
110 | } | ||
111 | } | ||
109 | } | 112 | } |
110 | } | 113 | } |
111 | } | 114 | eprintln!("Total modules found: {}", visited_modules.len()); |
112 | eprintln!("Total modules found: {}", visited_modules.len()); | 115 | eprintln!("Total declarations: {}", num_decls); |
113 | eprintln!("Total declarations: {}", num_decls); | 116 | eprintln!("Total functions: {}", funcs.len()); |
114 | eprintln!("Total functions: {}", funcs.len()); | 117 | eprintln!("Item Collection: {}", analysis_sw.elapsed()); |
115 | eprintln!("Item Collection: {}", analysis_sw.elapsed()); | ||
116 | |||
117 | if randomize { | ||
118 | shuffle(&mut rng, &mut funcs); | ||
119 | } | ||
120 | 118 | ||
121 | let mut bar = match verbosity { | 119 | if self.randomize { |
122 | Verbosity::Quiet | Verbosity::Spammy => ProgressReport::hidden(), | 120 | shuffle(&mut rng, &mut funcs); |
123 | _ if parallel => ProgressReport::hidden(), | 121 | } |
124 | _ => ProgressReport::new(funcs.len() as u64), | ||
125 | }; | ||
126 | 122 | ||
127 | if parallel { | 123 | let mut bar = match verbosity { |
128 | let mut inference_sw = StopWatch::start().memory(memory_usage); | 124 | Verbosity::Quiet | Verbosity::Spammy => ProgressReport::hidden(), |
129 | let snap = Snap(db.snapshot()); | 125 | _ if self.parallel => ProgressReport::hidden(), |
130 | funcs | 126 | _ => ProgressReport::new(funcs.len() as u64), |
131 | .par_iter() | 127 | }; |
132 | .map_with(snap, |snap, &f| { | ||
133 | let f_id = FunctionId::from(f); | ||
134 | snap.0.body(f_id.into()); | ||
135 | snap.0.infer(f_id.into()); | ||
136 | }) | ||
137 | .count(); | ||
138 | eprintln!("Parallel Inference: {}", inference_sw.elapsed()); | ||
139 | } | ||
140 | 128 | ||
141 | let mut inference_sw = StopWatch::start().memory(memory_usage); | 129 | if self.parallel { |
142 | bar.tick(); | 130 | let mut inference_sw = self.stop_watch(); |
143 | let mut num_exprs = 0; | 131 | let snap = Snap(db.snapshot()); |
144 | let mut num_exprs_unknown = 0; | 132 | funcs |
145 | let mut num_exprs_partially_unknown = 0; | 133 | .par_iter() |
146 | let mut num_type_mismatches = 0; | 134 | .map_with(snap, |snap, &f| { |
147 | for f in funcs { | 135 | let f_id = FunctionId::from(f); |
148 | let name = f.name(db); | 136 | snap.0.body(f_id.into()); |
149 | let full_name = f | 137 | snap.0.infer(f_id.into()); |
150 | .module(db) | 138 | }) |
151 | .path_to_root(db) | 139 | .count(); |
152 | .into_iter() | 140 | eprintln!("Parallel Inference: {}", inference_sw.elapsed()); |
153 | .rev() | ||
154 | .filter_map(|it| it.name(db)) | ||
155 | .chain(Some(f.name(db))) | ||
156 | .join("::"); | ||
157 | if let Some(only_name) = only { | ||
158 | if name.to_string() != only_name && full_name != only_name { | ||
159 | continue; | ||
160 | } | ||
161 | } | ||
162 | let mut msg = format!("processing: {}", full_name); | ||
163 | if verbosity.is_verbose() { | ||
164 | let src = f.source(db); | ||
165 | let original_file = src.file_id.original_file(db); | ||
166 | let path = vfs.file_path(original_file); | ||
167 | let syntax_range = src.value.syntax().text_range(); | ||
168 | format_to!(msg, " ({} {:?})", path, syntax_range); | ||
169 | } | 141 | } |
170 | if verbosity.is_spammy() { | 142 | |
171 | bar.println(msg.to_string()); | 143 | let mut inference_sw = self.stop_watch(); |
172 | } | 144 | bar.tick(); |
173 | bar.set_message(&msg); | 145 | let mut num_exprs = 0; |
174 | let f_id = FunctionId::from(f); | 146 | let mut num_exprs_unknown = 0; |
175 | let body = db.body(f_id.into()); | 147 | let mut num_exprs_partially_unknown = 0; |
176 | let inference_result = db.infer(f_id.into()); | 148 | let mut num_type_mismatches = 0; |
177 | let (previous_exprs, previous_unknown, previous_partially_unknown) = | 149 | for f in funcs { |
178 | (num_exprs, num_exprs_unknown, num_exprs_partially_unknown); | 150 | let name = f.name(db); |
179 | for (expr_id, _) in body.exprs.iter() { | 151 | let full_name = f |
180 | let ty = &inference_result[expr_id]; | 152 | .module(db) |
181 | num_exprs += 1; | 153 | .path_to_root(db) |
182 | if let Ty::Unknown = ty { | 154 | .into_iter() |
183 | num_exprs_unknown += 1; | 155 | .rev() |
184 | } else { | 156 | .filter_map(|it| it.name(db)) |
185 | let mut is_partially_unknown = false; | 157 | .chain(Some(f.name(db))) |
186 | ty.walk(&mut |ty| { | 158 | .join("::"); |
187 | if let Ty::Unknown = ty { | 159 | if let Some(only_name) = self.only.as_deref() { |
188 | is_partially_unknown = true; | 160 | if name.to_string() != only_name && full_name != only_name { |
189 | } | 161 | continue; |
190 | }); | ||
191 | if is_partially_unknown { | ||
192 | num_exprs_partially_unknown += 1; | ||
193 | } | 162 | } |
194 | } | 163 | } |
195 | if only.is_some() && verbosity.is_spammy() { | 164 | let mut msg = format!("processing: {}", full_name); |
196 | // in super-verbose mode for just one function, we print every single expression | 165 | if verbosity.is_verbose() { |
197 | let (_, sm) = db.body_with_source_map(f_id.into()); | 166 | let src = f.source(db); |
198 | let src = sm.expr_syntax(expr_id); | 167 | let original_file = src.file_id.original_file(db); |
199 | if let Ok(src) = src { | 168 | let path = vfs.file_path(original_file); |
200 | let node = { | 169 | let syntax_range = src.value.syntax().text_range(); |
201 | let root = db.parse_or_expand(src.file_id).unwrap(); | 170 | format_to!(msg, " ({} {:?})", path, syntax_range); |
202 | src.value.to_node(&root) | 171 | } |
203 | }; | 172 | if verbosity.is_spammy() { |
204 | let original_file = src.file_id.original_file(db); | 173 | bar.println(msg.to_string()); |
205 | let line_index = host.analysis().file_line_index(original_file).unwrap(); | 174 | } |
206 | let text_range = node.syntax().text_range(); | 175 | bar.set_message(&msg); |
207 | let (start, end) = ( | 176 | let f_id = FunctionId::from(f); |
208 | line_index.line_col(text_range.start()), | 177 | let body = db.body(f_id.into()); |
209 | line_index.line_col(text_range.end()), | 178 | let inference_result = db.infer(f_id.into()); |
210 | ); | 179 | let (previous_exprs, previous_unknown, previous_partially_unknown) = |
211 | bar.println(format!( | 180 | (num_exprs, num_exprs_unknown, num_exprs_partially_unknown); |
212 | "{}:{}-{}:{}: {}", | 181 | for (expr_id, _) in body.exprs.iter() { |
213 | start.line + 1, | 182 | let ty = &inference_result[expr_id]; |
214 | start.col_utf16, | 183 | num_exprs += 1; |
215 | end.line + 1, | 184 | if let Ty::Unknown = ty { |
216 | end.col_utf16, | 185 | num_exprs_unknown += 1; |
217 | ty.display(db) | ||
218 | )); | ||
219 | } else { | 186 | } else { |
220 | bar.println(format!("unknown location: {}", ty.display(db))); | 187 | let mut is_partially_unknown = false; |
188 | ty.walk(&mut |ty| { | ||
189 | if let Ty::Unknown = ty { | ||
190 | is_partially_unknown = true; | ||
191 | } | ||
192 | }); | ||
193 | if is_partially_unknown { | ||
194 | num_exprs_partially_unknown += 1; | ||
195 | } | ||
221 | } | 196 | } |
222 | } | 197 | if self.only.is_some() && verbosity.is_spammy() { |
223 | if let Some(mismatch) = inference_result.type_mismatch_for_expr(expr_id) { | 198 | // in super-verbose mode for just one function, we print every single expression |
224 | num_type_mismatches += 1; | ||
225 | if verbosity.is_verbose() { | ||
226 | let (_, sm) = db.body_with_source_map(f_id.into()); | 199 | let (_, sm) = db.body_with_source_map(f_id.into()); |
227 | let src = sm.expr_syntax(expr_id); | 200 | let src = sm.expr_syntax(expr_id); |
228 | if let Ok(src) = src { | 201 | if let Ok(src) = src { |
229 | // FIXME: it might be nice to have a function (on Analysis?) that goes from Source<T> -> (LineCol, LineCol) directly | 202 | let node = { |
230 | // But also, we should just turn the type mismatches into diagnostics and provide these | 203 | let root = db.parse_or_expand(src.file_id).unwrap(); |
231 | let root = db.parse_or_expand(src.file_id).unwrap(); | 204 | src.value.to_node(&root) |
232 | let node = src.map(|e| e.to_node(&root).syntax().clone()); | 205 | }; |
233 | let original_range = original_range(db, node.as_ref()); | 206 | let original_file = src.file_id.original_file(db); |
234 | let path = vfs.file_path(original_range.file_id); | 207 | let line_index = host.analysis().file_line_index(original_file).unwrap(); |
235 | let line_index = | 208 | let text_range = node.syntax().text_range(); |
236 | host.analysis().file_line_index(original_range.file_id).unwrap(); | ||
237 | let text_range = original_range.range; | ||
238 | let (start, end) = ( | 209 | let (start, end) = ( |
239 | line_index.line_col(text_range.start()), | 210 | line_index.line_col(text_range.start()), |
240 | line_index.line_col(text_range.end()), | 211 | line_index.line_col(text_range.end()), |
241 | ); | 212 | ); |
242 | bar.println(format!( | 213 | bar.println(format!( |
243 | "{} {}:{}-{}:{}: Expected {}, got {}", | 214 | "{}:{}-{}:{}: {}", |
244 | path, | ||
245 | start.line + 1, | 215 | start.line + 1, |
246 | start.col_utf16, | 216 | start.col_utf16, |
247 | end.line + 1, | 217 | end.line + 1, |
248 | end.col_utf16, | 218 | end.col_utf16, |
249 | mismatch.expected.display(db), | 219 | ty.display(db) |
250 | mismatch.actual.display(db) | ||
251 | )); | 220 | )); |
252 | } else { | 221 | } else { |
253 | bar.println(format!( | 222 | bar.println(format!("unknown location: {}", ty.display(db))); |
254 | "{}: Expected {}, got {}", | 223 | } |
255 | name, | 224 | } |
256 | mismatch.expected.display(db), | 225 | if let Some(mismatch) = inference_result.type_mismatch_for_expr(expr_id) { |
257 | mismatch.actual.display(db) | 226 | num_type_mismatches += 1; |
258 | )); | 227 | if verbosity.is_verbose() { |
228 | let (_, sm) = db.body_with_source_map(f_id.into()); | ||
229 | let src = sm.expr_syntax(expr_id); | ||
230 | if let Ok(src) = src { | ||
231 | // FIXME: it might be nice to have a function (on Analysis?) that goes from Source<T> -> (LineCol, LineCol) directly | ||
232 | // But also, we should just turn the type mismatches into diagnostics and provide these | ||
233 | let root = db.parse_or_expand(src.file_id).unwrap(); | ||
234 | let node = src.map(|e| e.to_node(&root).syntax().clone()); | ||
235 | let original_range = original_range(db, node.as_ref()); | ||
236 | let path = vfs.file_path(original_range.file_id); | ||
237 | let line_index = | ||
238 | host.analysis().file_line_index(original_range.file_id).unwrap(); | ||
239 | let text_range = original_range.range; | ||
240 | let (start, end) = ( | ||
241 | line_index.line_col(text_range.start()), | ||
242 | line_index.line_col(text_range.end()), | ||
243 | ); | ||
244 | bar.println(format!( | ||
245 | "{} {}:{}-{}:{}: Expected {}, got {}", | ||
246 | path, | ||
247 | start.line + 1, | ||
248 | start.col_utf16, | ||
249 | end.line + 1, | ||
250 | end.col_utf16, | ||
251 | mismatch.expected.display(db), | ||
252 | mismatch.actual.display(db) | ||
253 | )); | ||
254 | } else { | ||
255 | bar.println(format!( | ||
256 | "{}: Expected {}, got {}", | ||
257 | name, | ||
258 | mismatch.expected.display(db), | ||
259 | mismatch.actual.display(db) | ||
260 | )); | ||
261 | } | ||
259 | } | 262 | } |
260 | } | 263 | } |
261 | } | 264 | } |
265 | if verbosity.is_spammy() { | ||
266 | bar.println(format!( | ||
267 | "In {}: {} exprs, {} unknown, {} partial", | ||
268 | full_name, | ||
269 | num_exprs - previous_exprs, | ||
270 | num_exprs_unknown - previous_unknown, | ||
271 | num_exprs_partially_unknown - previous_partially_unknown | ||
272 | )); | ||
273 | } | ||
274 | bar.inc(1); | ||
262 | } | 275 | } |
263 | if verbosity.is_spammy() { | 276 | bar.finish_and_clear(); |
264 | bar.println(format!( | 277 | eprintln!("Total expressions: {}", num_exprs); |
265 | "In {}: {} exprs, {} unknown, {} partial", | 278 | eprintln!( |
266 | full_name, | 279 | "Expressions of unknown type: {} ({}%)", |
267 | num_exprs - previous_exprs, | 280 | num_exprs_unknown, |
268 | num_exprs_unknown - previous_unknown, | 281 | if num_exprs > 0 { num_exprs_unknown * 100 / num_exprs } else { 100 } |
269 | num_exprs_partially_unknown - previous_partially_unknown | 282 | ); |
270 | )); | 283 | report_metric("unknown type", num_exprs_unknown, "#"); |
271 | } | ||
272 | bar.inc(1); | ||
273 | } | ||
274 | bar.finish_and_clear(); | ||
275 | eprintln!("Total expressions: {}", num_exprs); | ||
276 | eprintln!( | ||
277 | "Expressions of unknown type: {} ({}%)", | ||
278 | num_exprs_unknown, | ||
279 | if num_exprs > 0 { num_exprs_unknown * 100 / num_exprs } else { 100 } | ||
280 | ); | ||
281 | report_metric("unknown type", num_exprs_unknown, "#"); | ||
282 | 284 | ||
283 | eprintln!( | 285 | eprintln!( |
284 | "Expressions of partially unknown type: {} ({}%)", | 286 | "Expressions of partially unknown type: {} ({}%)", |
285 | num_exprs_partially_unknown, | 287 | num_exprs_partially_unknown, |
286 | if num_exprs > 0 { num_exprs_partially_unknown * 100 / num_exprs } else { 100 } | 288 | if num_exprs > 0 { num_exprs_partially_unknown * 100 / num_exprs } else { 100 } |
287 | ); | 289 | ); |
288 | 290 | ||
289 | eprintln!("Type mismatches: {}", num_type_mismatches); | 291 | eprintln!("Type mismatches: {}", num_type_mismatches); |
290 | report_metric("type mismatches", num_type_mismatches, "#"); | 292 | report_metric("type mismatches", num_type_mismatches, "#"); |
291 | 293 | ||
292 | eprintln!("Inference: {}", inference_sw.elapsed()); | 294 | eprintln!("Inference: {}", inference_sw.elapsed()); |
293 | 295 | ||
294 | let total_span = analysis_sw.elapsed(); | 296 | let total_span = analysis_sw.elapsed(); |
295 | eprintln!("Total: {}", total_span); | 297 | eprintln!("Total: {}", total_span); |
296 | report_metric("total time", total_span.time.as_millis() as u64, "ms"); | 298 | report_metric("total time", total_span.time.as_millis() as u64, "ms"); |
297 | if let Some(instructions) = total_span.instructions { | 299 | if let Some(instructions) = total_span.instructions { |
298 | report_metric("total instructions", instructions, "#instr"); | 300 | report_metric("total instructions", instructions, "#instr"); |
299 | } | 301 | } |
300 | if let Some(memory) = total_span.memory { | 302 | if let Some(memory) = total_span.memory { |
301 | report_metric("total memory", memory.allocated.megabytes() as u64, "MB"); | 303 | report_metric("total memory", memory.allocated.megabytes() as u64, "MB"); |
302 | } | 304 | } |
303 | 305 | ||
304 | if memory_usage { | 306 | if self.memory_usage { |
305 | print_memory_usage(host, vfs); | 307 | print_memory_usage(host, vfs); |
308 | } | ||
309 | |||
310 | Ok(()) | ||
306 | } | 311 | } |
307 | 312 | ||
308 | Ok(()) | 313 | fn stop_watch(&self) -> StopWatch { |
314 | StopWatch::start().memory(self.memory_usage) | ||
315 | } | ||
309 | } | 316 | } |
310 | 317 | ||
311 | fn shuffle<T>(rng: &mut Rand32, slice: &mut [T]) { | 318 | fn shuffle<T>(rng: &mut Rand32, slice: &mut [T]) { |