diff options
-rw-r--r-- | Cargo.lock | 28 | ||||
-rw-r--r-- | crates/ra_hir_ty/Cargo.toml | 6 | ||||
-rw-r--r-- | crates/ra_hir_ty/src/traits/chalk.rs | 19 | ||||
-rw-r--r-- | crates/ra_hir_ty/src/traits/chalk/mapping.rs | 22 | ||||
-rw-r--r-- | crates/ra_project_model/src/cfg_flag.rs | 4 | ||||
-rw-r--r-- | crates/ra_syntax/Cargo.toml | 2 | ||||
-rw-r--r-- | crates/ra_syntax/src/ast/expr_ext.rs | 3 | ||||
-rw-r--r-- | crates/rust-analyzer/Cargo.toml | 4 | ||||
-rw-r--r-- | crates/rust-analyzer/src/bin/args.rs | 33 | ||||
-rw-r--r-- | crates/rust-analyzer/src/bin/main.rs | 32 | ||||
-rw-r--r-- | crates/rust-analyzer/src/cli.rs | 4 | ||||
-rw-r--r-- | crates/rust-analyzer/src/cli/analysis_bench.rs | 135 | ||||
-rw-r--r-- | crates/rust-analyzer/src/cli/analysis_stats.rs | 451 | ||||
-rw-r--r-- | crates/rust-analyzer/src/handlers.rs | 4 | ||||
-rw-r--r-- | crates/stdx/src/lib.rs | 15 | ||||
-rw-r--r-- | crates/test_utils/src/fixture.rs | 8 |
16 files changed, 379 insertions, 391 deletions
diff --git a/Cargo.lock b/Cargo.lock index e63dcc530..234c31406 100644 --- a/Cargo.lock +++ b/Cargo.lock | |||
@@ -125,9 +125,9 @@ checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" | |||
125 | 125 | ||
126 | [[package]] | 126 | [[package]] |
127 | name = "chalk-derive" | 127 | name = "chalk-derive" |
128 | version = "0.18.0" | 128 | version = "0.19.0" |
129 | source = "registry+https://github.com/rust-lang/crates.io-index" | 129 | source = "registry+https://github.com/rust-lang/crates.io-index" |
130 | checksum = "eea3a22f0c30b2504ac4ab58934dac0d00b92a4d7788df32795cabca24c3f929" | 130 | checksum = "654c611946ba2629c5028cb7708687af975faf2c29d731824cb294c873df4697" |
131 | dependencies = [ | 131 | dependencies = [ |
132 | "proc-macro2", | 132 | "proc-macro2", |
133 | "quote", | 133 | "quote", |
@@ -137,9 +137,9 @@ dependencies = [ | |||
137 | 137 | ||
138 | [[package]] | 138 | [[package]] |
139 | name = "chalk-ir" | 139 | name = "chalk-ir" |
140 | version = "0.18.0" | 140 | version = "0.19.0" |
141 | source = "registry+https://github.com/rust-lang/crates.io-index" | 141 | source = "registry+https://github.com/rust-lang/crates.io-index" |
142 | checksum = "fb617b643e145e3b151502799e91a9625dd5daf1cf05dc2cb821bc75ae0c9cbd" | 142 | checksum = "0a5341fbc654ca886b73b804a36aebf0e621057ccc1a68e9815b5b39b3ac9ae8" |
143 | dependencies = [ | 143 | dependencies = [ |
144 | "chalk-derive", | 144 | "chalk-derive", |
145 | "lazy_static", | 145 | "lazy_static", |
@@ -147,9 +147,9 @@ dependencies = [ | |||
147 | 147 | ||
148 | [[package]] | 148 | [[package]] |
149 | name = "chalk-recursive" | 149 | name = "chalk-recursive" |
150 | version = "0.18.0" | 150 | version = "0.19.0" |
151 | source = "registry+https://github.com/rust-lang/crates.io-index" | 151 | source = "registry+https://github.com/rust-lang/crates.io-index" |
152 | checksum = "d280565c8eefbf9b2bc615df49c7dfd971faad37774bf65734e626fd23864bd6" | 152 | checksum = "4484807b155b5a411e6135d330295f9ba5042e2920b8712c6574ca6ea91e9ee5" |
153 | dependencies = [ | 153 | dependencies = [ |
154 | "chalk-derive", | 154 | "chalk-derive", |
155 | "chalk-ir", | 155 | "chalk-ir", |
@@ -160,9 +160,9 @@ dependencies = [ | |||
160 | 160 | ||
161 | [[package]] | 161 | [[package]] |
162 | name = "chalk-solve" | 162 | name = "chalk-solve" |
163 | version = "0.18.0" | 163 | version = "0.19.0" |
164 | source = "registry+https://github.com/rust-lang/crates.io-index" | 164 | source = "registry+https://github.com/rust-lang/crates.io-index" |
165 | checksum = "be906fbca3f3077dce0e76d9864771d0f450c946af0d86b569fb9504148a065a" | 165 | checksum = "281f82facd2538997fbe52132b1941ed213d266748215c31d15f62a8664429ad" |
166 | dependencies = [ | 166 | dependencies = [ |
167 | "chalk-derive", | 167 | "chalk-derive", |
168 | "chalk-ir", | 168 | "chalk-ir", |
@@ -607,9 +607,9 @@ dependencies = [ | |||
607 | 607 | ||
608 | [[package]] | 608 | [[package]] |
609 | name = "lsp-types" | 609 | name = "lsp-types" |
610 | version = "0.78.0" | 610 | version = "0.79.0" |
611 | source = "registry+https://github.com/rust-lang/crates.io-index" | 611 | source = "registry+https://github.com/rust-lang/crates.io-index" |
612 | checksum = "d2e6cf68e3492cfa2035f0382c1da1b6ab045db0320feca505b86b4f13d66c27" | 612 | checksum = "7f1f86677fdbe8df5f88b99131b1424e50aad27bbe3e5900d221bc414bd72e9b" |
613 | dependencies = [ | 613 | dependencies = [ |
614 | "base64", | 614 | "base64", |
615 | "bitflags", | 615 | "bitflags", |
@@ -842,9 +842,9 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" | |||
842 | 842 | ||
843 | [[package]] | 843 | [[package]] |
844 | name = "perf-event" | 844 | name = "perf-event" |
845 | version = "0.4.3" | 845 | version = "0.4.4" |
846 | source = "registry+https://github.com/rust-lang/crates.io-index" | 846 | source = "registry+https://github.com/rust-lang/crates.io-index" |
847 | checksum = "0cb38a2f363560fb3cfcb47f28848b245a41c7e0d63e0b190918b712b6bf6803" | 847 | checksum = "76c42ba5d85a2f4472b99f475fb60cf336d9b4c85b1ea8bb300fef2e3c7c8f89" |
848 | dependencies = [ | 848 | dependencies = [ |
849 | "libc", | 849 | "libc", |
850 | "perf-event-open-sys", | 850 | "perf-event-open-sys", |
@@ -1344,9 +1344,9 @@ dependencies = [ | |||
1344 | 1344 | ||
1345 | [[package]] | 1345 | [[package]] |
1346 | name = "rustc-ap-rustc_lexer" | 1346 | name = "rustc-ap-rustc_lexer" |
1347 | version = "669.0.0" | 1347 | version = "671.0.0" |
1348 | source = "registry+https://github.com/rust-lang/crates.io-index" | 1348 | source = "registry+https://github.com/rust-lang/crates.io-index" |
1349 | checksum = "456af5f09c006cf6c22c1a433ee0232c4bb74bdc6c647a010166a47c94ed2a63" | 1349 | checksum = "22e1221f3bfa2943c942cf8da319ab2346887f8757778c29c7f1822cd27b521f" |
1350 | dependencies = [ | 1350 | dependencies = [ |
1351 | "unicode-xid", | 1351 | "unicode-xid", |
1352 | ] | 1352 | ] |
diff --git a/crates/ra_hir_ty/Cargo.toml b/crates/ra_hir_ty/Cargo.toml index 7242e2cb6..623ce261a 100644 --- a/crates/ra_hir_ty/Cargo.toml +++ b/crates/ra_hir_ty/Cargo.toml | |||
@@ -28,9 +28,9 @@ test_utils = { path = "../test_utils" } | |||
28 | 28 | ||
29 | scoped-tls = "1" | 29 | scoped-tls = "1" |
30 | 30 | ||
31 | chalk-solve = { version = "0.18.0" } | 31 | chalk-solve = { version = "0.19.0" } |
32 | chalk-ir = { version = "0.18.0" } | 32 | chalk-ir = { version = "0.19.0" } |
33 | chalk-recursive = { version = "0.18.0" } | 33 | chalk-recursive = { version = "0.19.0" } |
34 | 34 | ||
35 | [dev-dependencies] | 35 | [dev-dependencies] |
36 | expect = { path = "../expect" } | 36 | expect = { path = "../expect" } |
diff --git a/crates/ra_hir_ty/src/traits/chalk.rs b/crates/ra_hir_ty/src/traits/chalk.rs index 5298dbecf..1c7065364 100644 --- a/crates/ra_hir_ty/src/traits/chalk.rs +++ b/crates/ra_hir_ty/src/traits/chalk.rs | |||
@@ -183,6 +183,7 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> { | |||
183 | .collect(), | 183 | .collect(), |
184 | 1, | 184 | 1, |
185 | ), | 185 | ), |
186 | where_clauses: make_binders(vec![], 0), | ||
186 | }; | 187 | }; |
187 | let num_vars = datas.num_binders; | 188 | let num_vars = datas.num_binders; |
188 | Arc::new(OpaqueTyDatum { opaque_ty_id: id, bound: make_binders(bound, num_vars) }) | 189 | Arc::new(OpaqueTyDatum { opaque_ty_id: id, bound: make_binders(bound, num_vars) }) |
@@ -193,15 +194,6 @@ impl<'a> chalk_solve::RustIrDatabase<Interner> for ChalkContext<'a> { | |||
193 | Ty::Unknown.to_chalk(self.db) | 194 | Ty::Unknown.to_chalk(self.db) |
194 | } | 195 | } |
195 | 196 | ||
196 | fn force_impl_for( | ||
197 | &self, | ||
198 | _well_known: rust_ir::WellKnownTrait, | ||
199 | _ty: &chalk_ir::TyData<Interner>, | ||
200 | ) -> Option<bool> { | ||
201 | // this method is mostly for rustc | ||
202 | None | ||
203 | } | ||
204 | |||
205 | fn is_object_safe(&self, _trait_id: chalk_ir::TraitId<Interner>) -> bool { | 197 | fn is_object_safe(&self, _trait_id: chalk_ir::TraitId<Interner>) -> bool { |
206 | // FIXME: implement actual object safety | 198 | // FIXME: implement actual object safety |
207 | true | 199 | true |
@@ -547,8 +539,13 @@ pub(crate) fn fn_def_datum_query( | |||
547 | ), | 539 | ), |
548 | where_clauses, | 540 | where_clauses, |
549 | }; | 541 | }; |
550 | let datum = | 542 | let datum = FnDefDatum { |
551 | FnDefDatum { id: fn_def_id, binders: make_binders(bound, sig.num_binders), abi: () }; | 543 | id: fn_def_id, |
544 | abi: (), | ||
545 | safety: chalk_ir::Safety::Safe, | ||
546 | variadic: sig.value.is_varargs, | ||
547 | binders: make_binders(bound, sig.num_binders), | ||
548 | }; | ||
552 | Arc::new(datum) | 549 | Arc::new(datum) |
553 | } | 550 | } |
554 | 551 | ||
diff --git a/crates/ra_hir_ty/src/traits/chalk/mapping.rs b/crates/ra_hir_ty/src/traits/chalk/mapping.rs index 09d8347ca..b3e92993d 100644 --- a/crates/ra_hir_ty/src/traits/chalk/mapping.rs +++ b/crates/ra_hir_ty/src/traits/chalk/mapping.rs | |||
@@ -30,11 +30,16 @@ impl ToChalk for Ty { | |||
30 | Ty::Apply(apply_ty) => match apply_ty.ctor { | 30 | Ty::Apply(apply_ty) => match apply_ty.ctor { |
31 | TypeCtor::Ref(m) => ref_to_chalk(db, m, apply_ty.parameters), | 31 | TypeCtor::Ref(m) => ref_to_chalk(db, m, apply_ty.parameters), |
32 | TypeCtor::Array => array_to_chalk(db, apply_ty.parameters), | 32 | TypeCtor::Array => array_to_chalk(db, apply_ty.parameters), |
33 | TypeCtor::FnPtr { num_args: _, is_varargs: _ } => { | 33 | TypeCtor::FnPtr { num_args: _, is_varargs } => { |
34 | // FIXME: handle is_varargs | ||
35 | let substitution = apply_ty.parameters.to_chalk(db).shifted_in(&Interner); | 34 | let substitution = apply_ty.parameters.to_chalk(db).shifted_in(&Interner); |
36 | chalk_ir::TyData::Function(chalk_ir::Fn { num_binders: 0, substitution }) | 35 | chalk_ir::TyData::Function(chalk_ir::FnPointer { |
37 | .intern(&Interner) | 36 | num_binders: 0, |
37 | abi: (), | ||
38 | safety: chalk_ir::Safety::Safe, | ||
39 | variadic: is_varargs, | ||
40 | substitution, | ||
41 | }) | ||
42 | .intern(&Interner) | ||
38 | } | 43 | } |
39 | _ => { | 44 | _ => { |
40 | let name = apply_ty.ctor.to_chalk(db); | 45 | let name = apply_ty.ctor.to_chalk(db); |
@@ -118,7 +123,12 @@ impl ToChalk for Ty { | |||
118 | let parameters = from_chalk(db, opaque_ty.substitution); | 123 | let parameters = from_chalk(db, opaque_ty.substitution); |
119 | Ty::Opaque(OpaqueTy { opaque_ty_id: impl_trait_id, parameters }) | 124 | Ty::Opaque(OpaqueTy { opaque_ty_id: impl_trait_id, parameters }) |
120 | } | 125 | } |
121 | chalk_ir::TyData::Function(chalk_ir::Fn { num_binders, substitution }) => { | 126 | chalk_ir::TyData::Function(chalk_ir::FnPointer { |
127 | num_binders, | ||
128 | variadic, | ||
129 | substitution, | ||
130 | .. | ||
131 | }) => { | ||
122 | assert_eq!(num_binders, 0); | 132 | assert_eq!(num_binders, 0); |
123 | let parameters: Substs = from_chalk( | 133 | let parameters: Substs = from_chalk( |
124 | db, | 134 | db, |
@@ -127,7 +137,7 @@ impl ToChalk for Ty { | |||
127 | Ty::Apply(ApplicationTy { | 137 | Ty::Apply(ApplicationTy { |
128 | ctor: TypeCtor::FnPtr { | 138 | ctor: TypeCtor::FnPtr { |
129 | num_args: (parameters.len() - 1) as u16, | 139 | num_args: (parameters.len() - 1) as u16, |
130 | is_varargs: false, | 140 | is_varargs: variadic, |
131 | }, | 141 | }, |
132 | parameters, | 142 | parameters, |
133 | }) | 143 | }) |
diff --git a/crates/ra_project_model/src/cfg_flag.rs b/crates/ra_project_model/src/cfg_flag.rs index 1bc5d4832..bd50056c6 100644 --- a/crates/ra_project_model/src/cfg_flag.rs +++ b/crates/ra_project_model/src/cfg_flag.rs | |||
@@ -4,7 +4,7 @@ | |||
4 | use std::str::FromStr; | 4 | use std::str::FromStr; |
5 | 5 | ||
6 | use ra_cfg::CfgOptions; | 6 | use ra_cfg::CfgOptions; |
7 | use stdx::split_delim; | 7 | use stdx::split_once; |
8 | 8 | ||
9 | #[derive(Clone, Eq, PartialEq, Debug)] | 9 | #[derive(Clone, Eq, PartialEq, Debug)] |
10 | pub enum CfgFlag { | 10 | pub enum CfgFlag { |
@@ -15,7 +15,7 @@ pub enum CfgFlag { | |||
15 | impl FromStr for CfgFlag { | 15 | impl FromStr for CfgFlag { |
16 | type Err = String; | 16 | type Err = String; |
17 | fn from_str(s: &str) -> Result<Self, Self::Err> { | 17 | fn from_str(s: &str) -> Result<Self, Self::Err> { |
18 | let res = match split_delim(s, '=') { | 18 | let res = match split_once(s, '=') { |
19 | Some((key, value)) => { | 19 | Some((key, value)) => { |
20 | if !(value.starts_with('"') && value.ends_with('"')) { | 20 | if !(value.starts_with('"') && value.ends_with('"')) { |
21 | return Err(format!("Invalid cfg ({:?}), value should be in quotes", s)); | 21 | return Err(format!("Invalid cfg ({:?}), value should be in quotes", s)); |
diff --git a/crates/ra_syntax/Cargo.toml b/crates/ra_syntax/Cargo.toml index 670f04578..fc4d7aa04 100644 --- a/crates/ra_syntax/Cargo.toml +++ b/crates/ra_syntax/Cargo.toml | |||
@@ -13,7 +13,7 @@ doctest = false | |||
13 | [dependencies] | 13 | [dependencies] |
14 | itertools = "0.9.0" | 14 | itertools = "0.9.0" |
15 | rowan = "0.10.0" | 15 | rowan = "0.10.0" |
16 | rustc_lexer = { version = "669.0.0", package = "rustc-ap-rustc_lexer" } | 16 | rustc_lexer = { version = "671.0.0", package = "rustc-ap-rustc_lexer" } |
17 | rustc-hash = "1.1.0" | 17 | rustc-hash = "1.1.0" |
18 | arrayvec = "0.5.1" | 18 | arrayvec = "0.5.1" |
19 | once_cell = "1.3.1" | 19 | once_cell = "1.3.1" |
diff --git a/crates/ra_syntax/src/ast/expr_ext.rs b/crates/ra_syntax/src/ast/expr_ext.rs index 365f94287..f5ba87223 100644 --- a/crates/ra_syntax/src/ast/expr_ext.rs +++ b/crates/ra_syntax/src/ast/expr_ext.rs | |||
@@ -333,13 +333,12 @@ impl ast::Literal { | |||
333 | 333 | ||
334 | match token.kind() { | 334 | match token.kind() { |
335 | INT_NUMBER => { | 335 | INT_NUMBER => { |
336 | // FYI: there was a bug here previously, thus an if statement bellow is necessary. | 336 | // FYI: there was a bug here previously, thus the if statement below is necessary. |
337 | // The lexer treats e.g. `1f64` as an integer literal. See | 337 | // The lexer treats e.g. `1f64` as an integer literal. See |
338 | // https://github.com/rust-analyzer/rust-analyzer/issues/1592 | 338 | // https://github.com/rust-analyzer/rust-analyzer/issues/1592 |
339 | // and the comments on the linked PR. | 339 | // and the comments on the linked PR. |
340 | 340 | ||
341 | let text = token.text(); | 341 | let text = token.text(); |
342 | |||
343 | if let suffix @ Some(_) = Self::find_suffix(&text, &FLOAT_SUFFIXES) { | 342 | if let suffix @ Some(_) = Self::find_suffix(&text, &FLOAT_SUFFIXES) { |
344 | LiteralKind::FloatNumber { suffix } | 343 | LiteralKind::FloatNumber { suffix } |
345 | } else { | 344 | } else { |
diff --git a/crates/rust-analyzer/Cargo.toml b/crates/rust-analyzer/Cargo.toml index 931fc61ed..02c1371ac 100644 --- a/crates/rust-analyzer/Cargo.toml +++ b/crates/rust-analyzer/Cargo.toml | |||
@@ -20,7 +20,7 @@ env_logger = { version = "0.7.1", default-features = false } | |||
20 | itertools = "0.9.0" | 20 | itertools = "0.9.0" |
21 | jod-thread = "0.1.0" | 21 | jod-thread = "0.1.0" |
22 | log = "0.4.8" | 22 | log = "0.4.8" |
23 | lsp-types = { version = "0.78.0", features = ["proposed"] } | 23 | lsp-types = { version = "0.79.0", features = ["proposed"] } |
24 | parking_lot = "0.11.0" | 24 | parking_lot = "0.11.0" |
25 | pico-args = "0.3.1" | 25 | pico-args = "0.3.1" |
26 | oorandom = "11.1.2" | 26 | oorandom = "11.1.2" |
@@ -42,7 +42,7 @@ ra_syntax = { path = "../ra_syntax" } | |||
42 | ra_text_edit = { path = "../ra_text_edit" } | 42 | ra_text_edit = { path = "../ra_text_edit" } |
43 | vfs = { path = "../vfs" } | 43 | vfs = { path = "../vfs" } |
44 | vfs-notify = { path = "../vfs-notify" } | 44 | vfs-notify = { path = "../vfs-notify" } |
45 | ra_cfg = { path = "../ra_cfg"} | 45 | ra_cfg = { path = "../ra_cfg" } |
46 | ra_toolchain = { path = "../ra_toolchain" } | 46 | ra_toolchain = { path = "../ra_toolchain" } |
47 | 47 | ||
48 | # This should only be used in CLI | 48 | # This should only be used in CLI |
diff --git a/crates/rust-analyzer/src/bin/args.rs b/crates/rust-analyzer/src/bin/args.rs index 741a2a951..f16e35d86 100644 --- a/crates/rust-analyzer/src/bin/args.rs +++ b/crates/rust-analyzer/src/bin/args.rs | |||
@@ -8,7 +8,7 @@ use std::{env, fmt::Write, path::PathBuf}; | |||
8 | use anyhow::{bail, Result}; | 8 | use anyhow::{bail, Result}; |
9 | use pico_args::Arguments; | 9 | use pico_args::Arguments; |
10 | use ra_ssr::{SsrPattern, SsrRule}; | 10 | use ra_ssr::{SsrPattern, SsrRule}; |
11 | use rust_analyzer::cli::{BenchWhat, Position, Verbosity}; | 11 | use rust_analyzer::cli::{AnalysisStatsCmd, BenchCmd, BenchWhat, Position, Verbosity}; |
12 | use vfs::AbsPathBuf; | 12 | use vfs::AbsPathBuf; |
13 | 13 | ||
14 | pub(crate) struct Args { | 14 | pub(crate) struct Args { |
@@ -24,23 +24,8 @@ pub(crate) enum Command { | |||
24 | Highlight { | 24 | Highlight { |
25 | rainbow: bool, | 25 | rainbow: bool, |
26 | }, | 26 | }, |
27 | Stats { | 27 | AnalysisStats(AnalysisStatsCmd), |
28 | randomize: bool, | 28 | Bench(BenchCmd), |
29 | parallel: bool, | ||
30 | memory_usage: bool, | ||
31 | only: Option<String>, | ||
32 | with_deps: bool, | ||
33 | path: PathBuf, | ||
34 | load_output_dirs: bool, | ||
35 | with_proc_macro: bool, | ||
36 | }, | ||
37 | Bench { | ||
38 | memory_usage: bool, | ||
39 | path: PathBuf, | ||
40 | what: BenchWhat, | ||
41 | load_output_dirs: bool, | ||
42 | with_proc_macro: bool, | ||
43 | }, | ||
44 | Diagnostics { | 29 | Diagnostics { |
45 | path: PathBuf, | 30 | path: PathBuf, |
46 | load_output_dirs: bool, | 31 | load_output_dirs: bool, |
@@ -199,7 +184,7 @@ ARGS: | |||
199 | trailing.pop().unwrap().into() | 184 | trailing.pop().unwrap().into() |
200 | }; | 185 | }; |
201 | 186 | ||
202 | Command::Stats { | 187 | Command::AnalysisStats(AnalysisStatsCmd { |
203 | randomize, | 188 | randomize, |
204 | parallel, | 189 | parallel, |
205 | memory_usage, | 190 | memory_usage, |
@@ -208,7 +193,7 @@ ARGS: | |||
208 | path, | 193 | path, |
209 | load_output_dirs, | 194 | load_output_dirs, |
210 | with_proc_macro, | 195 | with_proc_macro, |
211 | } | 196 | }) |
212 | } | 197 | } |
213 | "analysis-bench" => { | 198 | "analysis-bench" => { |
214 | if matches.contains(["-h", "--help"]) { | 199 | if matches.contains(["-h", "--help"]) { |
@@ -256,7 +241,13 @@ ARGS: | |||
256 | let memory_usage = matches.contains("--memory-usage"); | 241 | let memory_usage = matches.contains("--memory-usage"); |
257 | let load_output_dirs = matches.contains("--load-output-dirs"); | 242 | let load_output_dirs = matches.contains("--load-output-dirs"); |
258 | let with_proc_macro = matches.contains("--with-proc-macro"); | 243 | let with_proc_macro = matches.contains("--with-proc-macro"); |
259 | Command::Bench { memory_usage, path, what, load_output_dirs, with_proc_macro } | 244 | Command::Bench(BenchCmd { |
245 | memory_usage, | ||
246 | path, | ||
247 | what, | ||
248 | load_output_dirs, | ||
249 | with_proc_macro, | ||
250 | }) | ||
260 | } | 251 | } |
261 | "diagnostics" => { | 252 | "diagnostics" => { |
262 | if matches.contains(["-h", "--help"]) { | 253 | if matches.contains(["-h", "--help"]) { |
diff --git a/crates/rust-analyzer/src/bin/main.rs b/crates/rust-analyzer/src/bin/main.rs index a473c9165..ff8234495 100644 --- a/crates/rust-analyzer/src/bin/main.rs +++ b/crates/rust-analyzer/src/bin/main.rs | |||
@@ -33,36 +33,8 @@ fn main() -> Result<()> { | |||
33 | args::Command::Parse { no_dump } => cli::parse(no_dump)?, | 33 | args::Command::Parse { no_dump } => cli::parse(no_dump)?, |
34 | args::Command::Symbols => cli::symbols()?, | 34 | args::Command::Symbols => cli::symbols()?, |
35 | args::Command::Highlight { rainbow } => cli::highlight(rainbow)?, | 35 | args::Command::Highlight { rainbow } => cli::highlight(rainbow)?, |
36 | args::Command::Stats { | 36 | args::Command::AnalysisStats(cmd) => cmd.run(args.verbosity)?, |
37 | randomize, | 37 | args::Command::Bench(cmd) => cmd.run(args.verbosity)?, |
38 | parallel, | ||
39 | memory_usage, | ||
40 | only, | ||
41 | with_deps, | ||
42 | path, | ||
43 | load_output_dirs, | ||
44 | with_proc_macro, | ||
45 | } => cli::analysis_stats( | ||
46 | args.verbosity, | ||
47 | memory_usage, | ||
48 | path.as_ref(), | ||
49 | only.as_ref().map(String::as_ref), | ||
50 | with_deps, | ||
51 | randomize, | ||
52 | parallel, | ||
53 | load_output_dirs, | ||
54 | with_proc_macro, | ||
55 | )?, | ||
56 | args::Command::Bench { memory_usage, path, what, load_output_dirs, with_proc_macro } => { | ||
57 | cli::analysis_bench( | ||
58 | args.verbosity, | ||
59 | path.as_ref(), | ||
60 | what, | ||
61 | memory_usage, | ||
62 | load_output_dirs, | ||
63 | with_proc_macro, | ||
64 | )? | ||
65 | } | ||
66 | args::Command::Diagnostics { path, load_output_dirs, with_proc_macro, all } => { | 38 | args::Command::Diagnostics { path, load_output_dirs, with_proc_macro, all } => { |
67 | cli::diagnostics(path.as_ref(), load_output_dirs, with_proc_macro, all)? | 39 | cli::diagnostics(path.as_ref(), load_output_dirs, with_proc_macro, all)? |
68 | } | 40 | } |
diff --git a/crates/rust-analyzer/src/cli.rs b/crates/rust-analyzer/src/cli.rs index a9b9c8923..1034d11bd 100644 --- a/crates/rust-analyzer/src/cli.rs +++ b/crates/rust-analyzer/src/cli.rs | |||
@@ -14,8 +14,8 @@ use ra_ide::Analysis; | |||
14 | use ra_prof::profile; | 14 | use ra_prof::profile; |
15 | use ra_syntax::{AstNode, SourceFile}; | 15 | use ra_syntax::{AstNode, SourceFile}; |
16 | 16 | ||
17 | pub use analysis_bench::{analysis_bench, BenchWhat, Position}; | 17 | pub use analysis_bench::{BenchCmd, BenchWhat, Position}; |
18 | pub use analysis_stats::analysis_stats; | 18 | pub use analysis_stats::AnalysisStatsCmd; |
19 | pub use diagnostics::diagnostics; | 19 | pub use diagnostics::diagnostics; |
20 | pub use load_cargo::load_cargo; | 20 | pub use load_cargo::load_cargo; |
21 | pub use ssr::{apply_ssr_rules, search_for_patterns}; | 21 | pub use ssr::{apply_ssr_rules, search_for_patterns}; |
diff --git a/crates/rust-analyzer/src/cli/analysis_bench.rs b/crates/rust-analyzer/src/cli/analysis_bench.rs index 076184ad6..c54ee5f4d 100644 --- a/crates/rust-analyzer/src/cli/analysis_bench.rs +++ b/crates/rust-analyzer/src/cli/analysis_bench.rs | |||
@@ -1,8 +1,8 @@ | |||
1 | //! Benchmark operations like highlighting or goto definition. | 1 | //! Benchmark operations like highlighting or goto definition. |
2 | 2 | ||
3 | use std::{env, path::Path, str::FromStr, sync::Arc, time::Instant}; | 3 | use std::{env, path::PathBuf, str::FromStr, sync::Arc, time::Instant}; |
4 | 4 | ||
5 | use anyhow::{format_err, Result}; | 5 | use anyhow::{bail, format_err, Result}; |
6 | use ra_db::{ | 6 | use ra_db::{ |
7 | salsa::{Database, Durability}, | 7 | salsa::{Database, Durability}, |
8 | FileId, | 8 | FileId, |
@@ -15,6 +15,14 @@ use crate::{ | |||
15 | print_memory_usage, | 15 | print_memory_usage, |
16 | }; | 16 | }; |
17 | 17 | ||
18 | pub struct BenchCmd { | ||
19 | pub path: PathBuf, | ||
20 | pub what: BenchWhat, | ||
21 | pub memory_usage: bool, | ||
22 | pub load_output_dirs: bool, | ||
23 | pub with_proc_macro: bool, | ||
24 | } | ||
25 | |||
18 | pub enum BenchWhat { | 26 | pub enum BenchWhat { |
19 | Highlight { path: AbsPathBuf }, | 27 | Highlight { path: AbsPathBuf }, |
20 | Complete(Position), | 28 | Complete(Position), |
@@ -30,85 +38,80 @@ pub struct Position { | |||
30 | impl FromStr for Position { | 38 | impl FromStr for Position { |
31 | type Err = anyhow::Error; | 39 | type Err = anyhow::Error; |
32 | fn from_str(s: &str) -> Result<Self> { | 40 | fn from_str(s: &str) -> Result<Self> { |
33 | let (path_line, column) = rsplit_at_char(s, ':')?; | 41 | let mut split = s.rsplitn(3, ':'); |
34 | let (path, line) = rsplit_at_char(path_line, ':')?; | 42 | match (split.next(), split.next(), split.next()) { |
35 | let path = env::current_dir().unwrap().join(path); | 43 | (Some(column), Some(line), Some(path)) => { |
36 | let path = AbsPathBuf::assert(path); | 44 | let path = env::current_dir().unwrap().join(path); |
37 | Ok(Position { path, line: line.parse()?, column: column.parse()? }) | 45 | let path = AbsPathBuf::assert(path); |
46 | Ok(Position { path, line: line.parse()?, column: column.parse()? }) | ||
47 | } | ||
48 | _ => bail!("position should be in file:line:column format: {:?}", s), | ||
49 | } | ||
38 | } | 50 | } |
39 | } | 51 | } |
40 | 52 | ||
41 | fn rsplit_at_char(s: &str, c: char) -> Result<(&str, &str)> { | 53 | impl BenchCmd { |
42 | let idx = s.rfind(c).ok_or_else(|| format_err!("no `{}` in {}", c, s))?; | 54 | pub fn run(self, verbosity: Verbosity) -> Result<()> { |
43 | Ok((&s[..idx], &s[idx + 1..])) | 55 | ra_prof::init(); |
44 | } | ||
45 | 56 | ||
46 | pub fn analysis_bench( | 57 | let start = Instant::now(); |
47 | verbosity: Verbosity, | 58 | eprint!("loading: "); |
48 | path: &Path, | 59 | let (mut host, vfs) = load_cargo(&self.path, self.load_output_dirs, self.with_proc_macro)?; |
49 | what: BenchWhat, | 60 | eprintln!("{:?}\n", start.elapsed()); |
50 | memory_usage: bool, | 61 | |
51 | load_output_dirs: bool, | 62 | let file_id = { |
52 | with_proc_macro: bool, | 63 | let path = match &self.what { |
53 | ) -> Result<()> { | 64 | BenchWhat::Highlight { path } => path, |
54 | ra_prof::init(); | 65 | BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => &pos.path, |
55 | 66 | }; | |
56 | let start = Instant::now(); | 67 | let path = path.clone().into(); |
57 | eprint!("loading: "); | 68 | vfs.file_id(&path).ok_or_else(|| format_err!("Can't find {}", path))? |
58 | let (mut host, vfs) = load_cargo(path, load_output_dirs, with_proc_macro)?; | ||
59 | eprintln!("{:?}\n", start.elapsed()); | ||
60 | |||
61 | let file_id = { | ||
62 | let path = match &what { | ||
63 | BenchWhat::Highlight { path } => path, | ||
64 | BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => &pos.path, | ||
65 | }; | 69 | }; |
66 | let path = path.clone().into(); | ||
67 | vfs.file_id(&path).ok_or_else(|| format_err!("Can't find {}", path))? | ||
68 | }; | ||
69 | |||
70 | match &what { | ||
71 | BenchWhat::Highlight { .. } => { | ||
72 | let res = do_work(&mut host, file_id, |analysis| { | ||
73 | analysis.diagnostics(file_id, true).unwrap(); | ||
74 | analysis.highlight_as_html(file_id, false).unwrap() | ||
75 | }); | ||
76 | if verbosity.is_verbose() { | ||
77 | println!("\n{}", res); | ||
78 | } | ||
79 | } | ||
80 | BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => { | ||
81 | let is_completion = matches!(what, BenchWhat::Complete(..)); | ||
82 | 70 | ||
83 | let offset = host | 71 | match &self.what { |
84 | .analysis() | 72 | BenchWhat::Highlight { .. } => { |
85 | .file_line_index(file_id)? | ||
86 | .offset(LineCol { line: pos.line - 1, col_utf16: pos.column }); | ||
87 | let file_position = FilePosition { file_id, offset }; | ||
88 | |||
89 | if is_completion { | ||
90 | let options = CompletionConfig::default(); | ||
91 | let res = do_work(&mut host, file_id, |analysis| { | 73 | let res = do_work(&mut host, file_id, |analysis| { |
92 | analysis.completions(&options, file_position) | 74 | analysis.diagnostics(file_id, true).unwrap(); |
75 | analysis.highlight_as_html(file_id, false).unwrap() | ||
93 | }); | 76 | }); |
94 | if verbosity.is_verbose() { | 77 | if verbosity.is_verbose() { |
95 | println!("\n{:#?}", res); | 78 | println!("\n{}", res); |
96 | } | 79 | } |
97 | } else { | 80 | } |
98 | let res = | 81 | BenchWhat::Complete(pos) | BenchWhat::GotoDef(pos) => { |
99 | do_work(&mut host, file_id, |analysis| analysis.goto_definition(file_position)); | 82 | let is_completion = matches!(self.what, BenchWhat::Complete(..)); |
100 | if verbosity.is_verbose() { | 83 | |
101 | println!("\n{:#?}", res); | 84 | let offset = host |
85 | .analysis() | ||
86 | .file_line_index(file_id)? | ||
87 | .offset(LineCol { line: pos.line - 1, col_utf16: pos.column }); | ||
88 | let file_position = FilePosition { file_id, offset }; | ||
89 | |||
90 | if is_completion { | ||
91 | let options = CompletionConfig::default(); | ||
92 | let res = do_work(&mut host, file_id, |analysis| { | ||
93 | analysis.completions(&options, file_position) | ||
94 | }); | ||
95 | if verbosity.is_verbose() { | ||
96 | println!("\n{:#?}", res); | ||
97 | } | ||
98 | } else { | ||
99 | let res = do_work(&mut host, file_id, |analysis| { | ||
100 | analysis.goto_definition(file_position) | ||
101 | }); | ||
102 | if verbosity.is_verbose() { | ||
103 | println!("\n{:#?}", res); | ||
104 | } | ||
102 | } | 105 | } |
103 | } | 106 | } |
104 | } | 107 | } |
105 | } | ||
106 | 108 | ||
107 | if memory_usage { | 109 | if self.memory_usage { |
108 | print_memory_usage(host, vfs); | 110 | print_memory_usage(host, vfs); |
109 | } | 111 | } |
110 | 112 | ||
111 | Ok(()) | 113 | Ok(()) |
114 | } | ||
112 | } | 115 | } |
113 | 116 | ||
114 | fn do_work<F: Fn(&Analysis) -> T, T>(host: &mut AnalysisHost, file_id: FileId, work: F) -> T { | 117 | fn do_work<F: Fn(&Analysis) -> T, T>(host: &mut AnalysisHost, file_id: FileId, work: F) -> T { |
diff --git a/crates/rust-analyzer/src/cli/analysis_stats.rs b/crates/rust-analyzer/src/cli/analysis_stats.rs index a270eb481..721d41a58 100644 --- a/crates/rust-analyzer/src/cli/analysis_stats.rs +++ b/crates/rust-analyzer/src/cli/analysis_stats.rs | |||
@@ -2,7 +2,7 @@ | |||
2 | //! errors. | 2 | //! errors. |
3 | 3 | ||
4 | use std::{ | 4 | use std::{ |
5 | path::Path, | 5 | path::PathBuf, |
6 | time::{SystemTime, UNIX_EPOCH}, | 6 | time::{SystemTime, UNIX_EPOCH}, |
7 | }; | 7 | }; |
8 | 8 | ||
@@ -39,273 +39,280 @@ impl<DB: ParallelDatabase> Clone for Snap<salsa::Snapshot<DB>> { | |||
39 | } | 39 | } |
40 | } | 40 | } |
41 | 41 | ||
42 | pub fn analysis_stats( | 42 | pub struct AnalysisStatsCmd { |
43 | verbosity: Verbosity, | 43 | pub randomize: bool, |
44 | memory_usage: bool, | 44 | pub parallel: bool, |
45 | path: &Path, | 45 | pub memory_usage: bool, |
46 | only: Option<&str>, | 46 | pub only: Option<String>, |
47 | with_deps: bool, | 47 | pub with_deps: bool, |
48 | randomize: bool, | 48 | pub path: PathBuf, |
49 | parallel: bool, | 49 | pub load_output_dirs: bool, |
50 | load_output_dirs: bool, | 50 | pub with_proc_macro: bool, |
51 | with_proc_macro: bool, | 51 | } |
52 | ) -> Result<()> { | ||
53 | let mut rng = { | ||
54 | let seed = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64; | ||
55 | Rand32::new(seed) | ||
56 | }; | ||
57 | 52 | ||
58 | let mut db_load_sw = StopWatch::start().memory(memory_usage); | 53 | impl AnalysisStatsCmd { |
59 | let (host, vfs) = load_cargo(path, load_output_dirs, with_proc_macro)?; | 54 | pub fn run(self, verbosity: Verbosity) -> Result<()> { |
60 | let db = host.raw_database(); | 55 | let mut rng = { |
61 | eprintln!("Database loaded {}", db_load_sw.elapsed()); | 56 | let seed = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64; |
57 | Rand32::new(seed) | ||
58 | }; | ||
62 | 59 | ||
63 | let mut analysis_sw = StopWatch::start().memory(memory_usage); | 60 | let mut db_load_sw = self.stop_watch(); |
64 | let mut num_crates = 0; | 61 | let (host, vfs) = load_cargo(&self.path, self.load_output_dirs, self.with_proc_macro)?; |
65 | let mut visited_modules = FxHashSet::default(); | 62 | let db = host.raw_database(); |
66 | let mut visit_queue = Vec::new(); | 63 | eprintln!("Database loaded {}", db_load_sw.elapsed()); |
67 | 64 | ||
68 | let mut krates = Crate::all(db); | 65 | let mut analysis_sw = self.stop_watch(); |
69 | if randomize { | 66 | let mut num_crates = 0; |
70 | shuffle(&mut rng, &mut krates); | 67 | let mut visited_modules = FxHashSet::default(); |
71 | } | 68 | let mut visit_queue = Vec::new(); |
72 | for krate in krates { | ||
73 | let module = krate.root_module(db).expect("crate without root module"); | ||
74 | let file_id = module.definition_source(db).file_id; | ||
75 | let file_id = file_id.original_file(db); | ||
76 | let source_root = db.file_source_root(file_id); | ||
77 | let source_root = db.source_root(source_root); | ||
78 | if !source_root.is_library || with_deps { | ||
79 | num_crates += 1; | ||
80 | visit_queue.push(module); | ||
81 | } | ||
82 | } | ||
83 | 69 | ||
84 | if randomize { | 70 | let mut krates = Crate::all(db); |
85 | shuffle(&mut rng, &mut visit_queue); | 71 | if self.randomize { |
86 | } | 72 | shuffle(&mut rng, &mut krates); |
73 | } | ||
74 | for krate in krates { | ||
75 | let module = krate.root_module(db).expect("crate without root module"); | ||
76 | let file_id = module.definition_source(db).file_id; | ||
77 | let file_id = file_id.original_file(db); | ||
78 | let source_root = db.file_source_root(file_id); | ||
79 | let source_root = db.source_root(source_root); | ||
80 | if !source_root.is_library || self.with_deps { | ||
81 | num_crates += 1; | ||
82 | visit_queue.push(module); | ||
83 | } | ||
84 | } | ||
87 | 85 | ||
88 | eprintln!("Crates in this dir: {}", num_crates); | 86 | if self.randomize { |
89 | let mut num_decls = 0; | 87 | shuffle(&mut rng, &mut visit_queue); |
90 | let mut funcs = Vec::new(); | 88 | } |
91 | while let Some(module) = visit_queue.pop() { | ||
92 | if visited_modules.insert(module) { | ||
93 | visit_queue.extend(module.children(db)); | ||
94 | 89 | ||
95 | for decl in module.declarations(db) { | 90 | eprintln!("Crates in this dir: {}", num_crates); |
96 | num_decls += 1; | 91 | let mut num_decls = 0; |
97 | if let ModuleDef::Function(f) = decl { | 92 | let mut funcs = Vec::new(); |
98 | funcs.push(f); | 93 | while let Some(module) = visit_queue.pop() { |
99 | } | 94 | if visited_modules.insert(module) { |
100 | } | 95 | visit_queue.extend(module.children(db)); |
101 | 96 | ||
102 | for impl_def in module.impl_defs(db) { | 97 | for decl in module.declarations(db) { |
103 | for item in impl_def.items(db) { | ||
104 | num_decls += 1; | 98 | num_decls += 1; |
105 | if let AssocItem::Function(f) = item { | 99 | if let ModuleDef::Function(f) = decl { |
106 | funcs.push(f); | 100 | funcs.push(f); |
107 | } | 101 | } |
108 | } | 102 | } |
103 | |||
104 | for impl_def in module.impl_defs(db) { | ||
105 | for item in impl_def.items(db) { | ||
106 | num_decls += 1; | ||
107 | if let AssocItem::Function(f) = item { | ||
108 | funcs.push(f); | ||
109 | } | ||
110 | } | ||
111 | } | ||
109 | } | 112 | } |
110 | } | 113 | } |
111 | } | 114 | eprintln!("Total modules found: {}", visited_modules.len()); |
112 | eprintln!("Total modules found: {}", visited_modules.len()); | 115 | eprintln!("Total declarations: {}", num_decls); |
113 | eprintln!("Total declarations: {}", num_decls); | 116 | eprintln!("Total functions: {}", funcs.len()); |
114 | eprintln!("Total functions: {}", funcs.len()); | 117 | eprintln!("Item Collection: {}", analysis_sw.elapsed()); |
115 | eprintln!("Item Collection: {}", analysis_sw.elapsed()); | ||
116 | |||
117 | if randomize { | ||
118 | shuffle(&mut rng, &mut funcs); | ||
119 | } | ||
120 | 118 | ||
121 | let mut bar = match verbosity { | 119 | if self.randomize { |
122 | Verbosity::Quiet | Verbosity::Spammy => ProgressReport::hidden(), | 120 | shuffle(&mut rng, &mut funcs); |
123 | _ if parallel => ProgressReport::hidden(), | 121 | } |
124 | _ => ProgressReport::new(funcs.len() as u64), | ||
125 | }; | ||
126 | 122 | ||
127 | if parallel { | 123 | let mut bar = match verbosity { |
128 | let mut inference_sw = StopWatch::start().memory(memory_usage); | 124 | Verbosity::Quiet | Verbosity::Spammy => ProgressReport::hidden(), |
129 | let snap = Snap(db.snapshot()); | 125 | _ if self.parallel => ProgressReport::hidden(), |
130 | funcs | 126 | _ => ProgressReport::new(funcs.len() as u64), |
131 | .par_iter() | 127 | }; |
132 | .map_with(snap, |snap, &f| { | ||
133 | let f_id = FunctionId::from(f); | ||
134 | snap.0.body(f_id.into()); | ||
135 | snap.0.infer(f_id.into()); | ||
136 | }) | ||
137 | .count(); | ||
138 | eprintln!("Parallel Inference: {}", inference_sw.elapsed()); | ||
139 | } | ||
140 | 128 | ||
141 | let mut inference_sw = StopWatch::start().memory(memory_usage); | 129 | if self.parallel { |
142 | bar.tick(); | 130 | let mut inference_sw = self.stop_watch(); |
143 | let mut num_exprs = 0; | 131 | let snap = Snap(db.snapshot()); |
144 | let mut num_exprs_unknown = 0; | 132 | funcs |
145 | let mut num_exprs_partially_unknown = 0; | 133 | .par_iter() |
146 | let mut num_type_mismatches = 0; | 134 | .map_with(snap, |snap, &f| { |
147 | for f in funcs { | 135 | let f_id = FunctionId::from(f); |
148 | let name = f.name(db); | 136 | snap.0.body(f_id.into()); |
149 | let full_name = f | 137 | snap.0.infer(f_id.into()); |
150 | .module(db) | 138 | }) |
151 | .path_to_root(db) | 139 | .count(); |
152 | .into_iter() | 140 | eprintln!("Parallel Inference: {}", inference_sw.elapsed()); |
153 | .rev() | ||
154 | .filter_map(|it| it.name(db)) | ||
155 | .chain(Some(f.name(db))) | ||
156 | .join("::"); | ||
157 | if let Some(only_name) = only { | ||
158 | if name.to_string() != only_name && full_name != only_name { | ||
159 | continue; | ||
160 | } | ||
161 | } | ||
162 | let mut msg = format!("processing: {}", full_name); | ||
163 | if verbosity.is_verbose() { | ||
164 | let src = f.source(db); | ||
165 | let original_file = src.file_id.original_file(db); | ||
166 | let path = vfs.file_path(original_file); | ||
167 | let syntax_range = src.value.syntax().text_range(); | ||
168 | format_to!(msg, " ({} {:?})", path, syntax_range); | ||
169 | } | 141 | } |
170 | if verbosity.is_spammy() { | 142 | |
171 | bar.println(msg.to_string()); | 143 | let mut inference_sw = self.stop_watch(); |
172 | } | 144 | bar.tick(); |
173 | bar.set_message(&msg); | 145 | let mut num_exprs = 0; |
174 | let f_id = FunctionId::from(f); | 146 | let mut num_exprs_unknown = 0; |
175 | let body = db.body(f_id.into()); | 147 | let mut num_exprs_partially_unknown = 0; |
176 | let inference_result = db.infer(f_id.into()); | 148 | let mut num_type_mismatches = 0; |
177 | let (previous_exprs, previous_unknown, previous_partially_unknown) = | 149 | for f in funcs { |
178 | (num_exprs, num_exprs_unknown, num_exprs_partially_unknown); | 150 | let name = f.name(db); |
179 | for (expr_id, _) in body.exprs.iter() { | 151 | let full_name = f |
180 | let ty = &inference_result[expr_id]; | 152 | .module(db) |
181 | num_exprs += 1; | 153 | .path_to_root(db) |
182 | if let Ty::Unknown = ty { | 154 | .into_iter() |
183 | num_exprs_unknown += 1; | 155 | .rev() |
184 | } else { | 156 | .filter_map(|it| it.name(db)) |
185 | let mut is_partially_unknown = false; | 157 | .chain(Some(f.name(db))) |
186 | ty.walk(&mut |ty| { | 158 | .join("::"); |
187 | if let Ty::Unknown = ty { | 159 | if let Some(only_name) = self.only.as_deref() { |
188 | is_partially_unknown = true; | 160 | if name.to_string() != only_name && full_name != only_name { |
189 | } | 161 | continue; |
190 | }); | ||
191 | if is_partially_unknown { | ||
192 | num_exprs_partially_unknown += 1; | ||
193 | } | 162 | } |
194 | } | 163 | } |
195 | if only.is_some() && verbosity.is_spammy() { | 164 | let mut msg = format!("processing: {}", full_name); |
196 | // in super-verbose mode for just one function, we print every single expression | 165 | if verbosity.is_verbose() { |
197 | let (_, sm) = db.body_with_source_map(f_id.into()); | 166 | let src = f.source(db); |
198 | let src = sm.expr_syntax(expr_id); | 167 | let original_file = src.file_id.original_file(db); |
199 | if let Ok(src) = src { | 168 | let path = vfs.file_path(original_file); |
200 | let node = { | 169 | let syntax_range = src.value.syntax().text_range(); |
201 | let root = db.parse_or_expand(src.file_id).unwrap(); | 170 | format_to!(msg, " ({} {:?})", path, syntax_range); |
202 | src.value.to_node(&root) | 171 | } |
203 | }; | 172 | if verbosity.is_spammy() { |
204 | let original_file = src.file_id.original_file(db); | 173 | bar.println(msg.to_string()); |
205 | let line_index = host.analysis().file_line_index(original_file).unwrap(); | 174 | } |
206 | let text_range = node.syntax().text_range(); | 175 | bar.set_message(&msg); |
207 | let (start, end) = ( | 176 | let f_id = FunctionId::from(f); |
208 | line_index.line_col(text_range.start()), | 177 | let body = db.body(f_id.into()); |
209 | line_index.line_col(text_range.end()), | 178 | let inference_result = db.infer(f_id.into()); |
210 | ); | 179 | let (previous_exprs, previous_unknown, previous_partially_unknown) = |
211 | bar.println(format!( | 180 | (num_exprs, num_exprs_unknown, num_exprs_partially_unknown); |
212 | "{}:{}-{}:{}: {}", | 181 | for (expr_id, _) in body.exprs.iter() { |
213 | start.line + 1, | 182 | let ty = &inference_result[expr_id]; |
214 | start.col_utf16, | 183 | num_exprs += 1; |
215 | end.line + 1, | 184 | if let Ty::Unknown = ty { |
216 | end.col_utf16, | 185 | num_exprs_unknown += 1; |
217 | ty.display(db) | ||
218 | )); | ||
219 | } else { | 186 | } else { |
220 | bar.println(format!("unknown location: {}", ty.display(db))); | 187 | let mut is_partially_unknown = false; |
188 | ty.walk(&mut |ty| { | ||
189 | if let Ty::Unknown = ty { | ||
190 | is_partially_unknown = true; | ||
191 | } | ||
192 | }); | ||
193 | if is_partially_unknown { | ||
194 | num_exprs_partially_unknown += 1; | ||
195 | } | ||
221 | } | 196 | } |
222 | } | 197 | if self.only.is_some() && verbosity.is_spammy() { |
223 | if let Some(mismatch) = inference_result.type_mismatch_for_expr(expr_id) { | 198 | // in super-verbose mode for just one function, we print every single expression |
224 | num_type_mismatches += 1; | ||
225 | if verbosity.is_verbose() { | ||
226 | let (_, sm) = db.body_with_source_map(f_id.into()); | 199 | let (_, sm) = db.body_with_source_map(f_id.into()); |
227 | let src = sm.expr_syntax(expr_id); | 200 | let src = sm.expr_syntax(expr_id); |
228 | if let Ok(src) = src { | 201 | if let Ok(src) = src { |
229 | // FIXME: it might be nice to have a function (on Analysis?) that goes from Source<T> -> (LineCol, LineCol) directly | 202 | let node = { |
230 | // But also, we should just turn the type mismatches into diagnostics and provide these | 203 | let root = db.parse_or_expand(src.file_id).unwrap(); |
231 | let root = db.parse_or_expand(src.file_id).unwrap(); | 204 | src.value.to_node(&root) |
232 | let node = src.map(|e| e.to_node(&root).syntax().clone()); | 205 | }; |
233 | let original_range = original_range(db, node.as_ref()); | 206 | let original_file = src.file_id.original_file(db); |
234 | let path = vfs.file_path(original_range.file_id); | 207 | let line_index = host.analysis().file_line_index(original_file).unwrap(); |
235 | let line_index = | 208 | let text_range = node.syntax().text_range(); |
236 | host.analysis().file_line_index(original_range.file_id).unwrap(); | ||
237 | let text_range = original_range.range; | ||
238 | let (start, end) = ( | 209 | let (start, end) = ( |
239 | line_index.line_col(text_range.start()), | 210 | line_index.line_col(text_range.start()), |
240 | line_index.line_col(text_range.end()), | 211 | line_index.line_col(text_range.end()), |
241 | ); | 212 | ); |
242 | bar.println(format!( | 213 | bar.println(format!( |
243 | "{} {}:{}-{}:{}: Expected {}, got {}", | 214 | "{}:{}-{}:{}: {}", |
244 | path, | ||
245 | start.line + 1, | 215 | start.line + 1, |
246 | start.col_utf16, | 216 | start.col_utf16, |
247 | end.line + 1, | 217 | end.line + 1, |
248 | end.col_utf16, | 218 | end.col_utf16, |
249 | mismatch.expected.display(db), | 219 | ty.display(db) |
250 | mismatch.actual.display(db) | ||
251 | )); | 220 | )); |
252 | } else { | 221 | } else { |
253 | bar.println(format!( | 222 | bar.println(format!("unknown location: {}", ty.display(db))); |
254 | "{}: Expected {}, got {}", | 223 | } |
255 | name, | 224 | } |
256 | mismatch.expected.display(db), | 225 | if let Some(mismatch) = inference_result.type_mismatch_for_expr(expr_id) { |
257 | mismatch.actual.display(db) | 226 | num_type_mismatches += 1; |
258 | )); | 227 | if verbosity.is_verbose() { |
228 | let (_, sm) = db.body_with_source_map(f_id.into()); | ||
229 | let src = sm.expr_syntax(expr_id); | ||
230 | if let Ok(src) = src { | ||
231 | // FIXME: it might be nice to have a function (on Analysis?) that goes from Source<T> -> (LineCol, LineCol) directly | ||
232 | // But also, we should just turn the type mismatches into diagnostics and provide these | ||
233 | let root = db.parse_or_expand(src.file_id).unwrap(); | ||
234 | let node = src.map(|e| e.to_node(&root).syntax().clone()); | ||
235 | let original_range = original_range(db, node.as_ref()); | ||
236 | let path = vfs.file_path(original_range.file_id); | ||
237 | let line_index = | ||
238 | host.analysis().file_line_index(original_range.file_id).unwrap(); | ||
239 | let text_range = original_range.range; | ||
240 | let (start, end) = ( | ||
241 | line_index.line_col(text_range.start()), | ||
242 | line_index.line_col(text_range.end()), | ||
243 | ); | ||
244 | bar.println(format!( | ||
245 | "{} {}:{}-{}:{}: Expected {}, got {}", | ||
246 | path, | ||
247 | start.line + 1, | ||
248 | start.col_utf16, | ||
249 | end.line + 1, | ||
250 | end.col_utf16, | ||
251 | mismatch.expected.display(db), | ||
252 | mismatch.actual.display(db) | ||
253 | )); | ||
254 | } else { | ||
255 | bar.println(format!( | ||
256 | "{}: Expected {}, got {}", | ||
257 | name, | ||
258 | mismatch.expected.display(db), | ||
259 | mismatch.actual.display(db) | ||
260 | )); | ||
261 | } | ||
259 | } | 262 | } |
260 | } | 263 | } |
261 | } | 264 | } |
265 | if verbosity.is_spammy() { | ||
266 | bar.println(format!( | ||
267 | "In {}: {} exprs, {} unknown, {} partial", | ||
268 | full_name, | ||
269 | num_exprs - previous_exprs, | ||
270 | num_exprs_unknown - previous_unknown, | ||
271 | num_exprs_partially_unknown - previous_partially_unknown | ||
272 | )); | ||
273 | } | ||
274 | bar.inc(1); | ||
262 | } | 275 | } |
263 | if verbosity.is_spammy() { | 276 | bar.finish_and_clear(); |
264 | bar.println(format!( | 277 | eprintln!("Total expressions: {}", num_exprs); |
265 | "In {}: {} exprs, {} unknown, {} partial", | 278 | eprintln!( |
266 | full_name, | 279 | "Expressions of unknown type: {} ({}%)", |
267 | num_exprs - previous_exprs, | 280 | num_exprs_unknown, |
268 | num_exprs_unknown - previous_unknown, | 281 | if num_exprs > 0 { num_exprs_unknown * 100 / num_exprs } else { 100 } |
269 | num_exprs_partially_unknown - previous_partially_unknown | 282 | ); |
270 | )); | 283 | report_metric("unknown type", num_exprs_unknown, "#"); |
271 | } | ||
272 | bar.inc(1); | ||
273 | } | ||
274 | bar.finish_and_clear(); | ||
275 | eprintln!("Total expressions: {}", num_exprs); | ||
276 | eprintln!( | ||
277 | "Expressions of unknown type: {} ({}%)", | ||
278 | num_exprs_unknown, | ||
279 | if num_exprs > 0 { num_exprs_unknown * 100 / num_exprs } else { 100 } | ||
280 | ); | ||
281 | report_metric("unknown type", num_exprs_unknown, "#"); | ||
282 | 284 | ||
283 | eprintln!( | 285 | eprintln!( |
284 | "Expressions of partially unknown type: {} ({}%)", | 286 | "Expressions of partially unknown type: {} ({}%)", |
285 | num_exprs_partially_unknown, | 287 | num_exprs_partially_unknown, |
286 | if num_exprs > 0 { num_exprs_partially_unknown * 100 / num_exprs } else { 100 } | 288 | if num_exprs > 0 { num_exprs_partially_unknown * 100 / num_exprs } else { 100 } |
287 | ); | 289 | ); |
288 | 290 | ||
289 | eprintln!("Type mismatches: {}", num_type_mismatches); | 291 | eprintln!("Type mismatches: {}", num_type_mismatches); |
290 | report_metric("type mismatches", num_type_mismatches, "#"); | 292 | report_metric("type mismatches", num_type_mismatches, "#"); |
291 | 293 | ||
292 | eprintln!("Inference: {}", inference_sw.elapsed()); | 294 | eprintln!("Inference: {}", inference_sw.elapsed()); |
293 | 295 | ||
294 | let total_span = analysis_sw.elapsed(); | 296 | let total_span = analysis_sw.elapsed(); |
295 | eprintln!("Total: {}", total_span); | 297 | eprintln!("Total: {}", total_span); |
296 | report_metric("total time", total_span.time.as_millis() as u64, "ms"); | 298 | report_metric("total time", total_span.time.as_millis() as u64, "ms"); |
297 | if let Some(instructions) = total_span.instructions { | 299 | if let Some(instructions) = total_span.instructions { |
298 | report_metric("total instructions", instructions, "#instr"); | 300 | report_metric("total instructions", instructions, "#instr"); |
299 | } | 301 | } |
300 | if let Some(memory) = total_span.memory { | 302 | if let Some(memory) = total_span.memory { |
301 | report_metric("total memory", memory.allocated.megabytes() as u64, "MB"); | 303 | report_metric("total memory", memory.allocated.megabytes() as u64, "MB"); |
302 | } | 304 | } |
303 | 305 | ||
304 | if memory_usage { | 306 | if self.memory_usage { |
305 | print_memory_usage(host, vfs); | 307 | print_memory_usage(host, vfs); |
308 | } | ||
309 | |||
310 | Ok(()) | ||
306 | } | 311 | } |
307 | 312 | ||
308 | Ok(()) | 313 | fn stop_watch(&self) -> StopWatch { |
314 | StopWatch::start().memory(self.memory_usage) | ||
315 | } | ||
309 | } | 316 | } |
310 | 317 | ||
311 | fn shuffle<T>(rng: &mut Rand32, slice: &mut [T]) { | 318 | fn shuffle<T>(rng: &mut Rand32, slice: &mut [T]) { |
diff --git a/crates/rust-analyzer/src/handlers.rs b/crates/rust-analyzer/src/handlers.rs index 62ed6880b..e73b3a211 100644 --- a/crates/rust-analyzer/src/handlers.rs +++ b/crates/rust-analyzer/src/handlers.rs | |||
@@ -26,7 +26,7 @@ use ra_project_model::TargetKind; | |||
26 | use ra_syntax::{algo, ast, AstNode, SyntaxKind, TextRange, TextSize}; | 26 | use ra_syntax::{algo, ast, AstNode, SyntaxKind, TextRange, TextSize}; |
27 | use serde::{Deserialize, Serialize}; | 27 | use serde::{Deserialize, Serialize}; |
28 | use serde_json::to_value; | 28 | use serde_json::to_value; |
29 | use stdx::{format_to, split_delim}; | 29 | use stdx::{format_to, split_once}; |
30 | 30 | ||
31 | use crate::{ | 31 | use crate::{ |
32 | cargo_target_spec::CargoTargetSpec, | 32 | cargo_target_spec::CargoTargetSpec, |
@@ -865,7 +865,7 @@ pub(crate) fn handle_resolve_code_action( | |||
865 | .map(|it| it.into_iter().filter_map(from_proto::assist_kind).collect()); | 865 | .map(|it| it.into_iter().filter_map(from_proto::assist_kind).collect()); |
866 | 866 | ||
867 | let assists = snap.analysis.resolved_assists(&snap.config.assist, frange)?; | 867 | let assists = snap.analysis.resolved_assists(&snap.config.assist, frange)?; |
868 | let (id_string, index) = split_delim(¶ms.id, ':').unwrap(); | 868 | let (id_string, index) = split_once(¶ms.id, ':').unwrap(); |
869 | let index = index.parse::<usize>().unwrap(); | 869 | let index = index.parse::<usize>().unwrap(); |
870 | let assist = &assists[index]; | 870 | let assist = &assists[index]; |
871 | assert!(assist.assist.id.0 == id_string); | 871 | assert!(assist.assist.id.0 == id_string); |
diff --git a/crates/stdx/src/lib.rs b/crates/stdx/src/lib.rs index ea0e6b949..b65875c96 100644 --- a/crates/stdx/src/lib.rs +++ b/crates/stdx/src/lib.rs | |||
@@ -109,9 +109,18 @@ pub fn replace(buf: &mut String, from: char, to: &str) { | |||
109 | *buf = buf.replace(from, to) | 109 | *buf = buf.replace(from, to) |
110 | } | 110 | } |
111 | 111 | ||
112 | pub fn split_delim(haystack: &str, delim: char) -> Option<(&str, &str)> { | 112 | // https://github.com/rust-lang/rust/issues/74773 |
113 | let idx = haystack.find(delim)?; | 113 | pub fn split_once(haystack: &str, delim: char) -> Option<(&str, &str)> { |
114 | Some((&haystack[..idx], &haystack[idx + delim.len_utf8()..])) | 114 | let mut split = haystack.splitn(2, delim); |
115 | let prefix = split.next()?; | ||
116 | let suffix = split.next()?; | ||
117 | Some((prefix, suffix)) | ||
118 | } | ||
119 | pub fn rsplit_once(haystack: &str, delim: char) -> Option<(&str, &str)> { | ||
120 | let mut split = haystack.rsplitn(2, delim); | ||
121 | let suffix = split.next()?; | ||
122 | let prefix = split.next()?; | ||
123 | Some((prefix, suffix)) | ||
115 | } | 124 | } |
116 | 125 | ||
117 | pub fn trim_indent(mut text: &str) -> String { | 126 | pub fn trim_indent(mut text: &str) -> String { |
diff --git a/crates/test_utils/src/fixture.rs b/crates/test_utils/src/fixture.rs index ed764046b..e40b61a94 100644 --- a/crates/test_utils/src/fixture.rs +++ b/crates/test_utils/src/fixture.rs | |||
@@ -2,7 +2,7 @@ | |||
2 | //! rust-analyzer database from a single string. | 2 | //! rust-analyzer database from a single string. |
3 | 3 | ||
4 | use rustc_hash::FxHashMap; | 4 | use rustc_hash::FxHashMap; |
5 | use stdx::{lines_with_ends, split_delim, trim_indent}; | 5 | use stdx::{lines_with_ends, split_once, trim_indent}; |
6 | 6 | ||
7 | #[derive(Debug, Eq, PartialEq)] | 7 | #[derive(Debug, Eq, PartialEq)] |
8 | pub struct Fixture { | 8 | pub struct Fixture { |
@@ -71,14 +71,14 @@ impl Fixture { | |||
71 | let mut cfg_key_values = Vec::new(); | 71 | let mut cfg_key_values = Vec::new(); |
72 | let mut env = FxHashMap::default(); | 72 | let mut env = FxHashMap::default(); |
73 | for component in components[1..].iter() { | 73 | for component in components[1..].iter() { |
74 | let (key, value) = split_delim(component, ':').unwrap(); | 74 | let (key, value) = split_once(component, ':').unwrap(); |
75 | match key { | 75 | match key { |
76 | "crate" => krate = Some(value.to_string()), | 76 | "crate" => krate = Some(value.to_string()), |
77 | "deps" => deps = value.split(',').map(|it| it.to_string()).collect(), | 77 | "deps" => deps = value.split(',').map(|it| it.to_string()).collect(), |
78 | "edition" => edition = Some(value.to_string()), | 78 | "edition" => edition = Some(value.to_string()), |
79 | "cfg" => { | 79 | "cfg" => { |
80 | for entry in value.split(',') { | 80 | for entry in value.split(',') { |
81 | match split_delim(entry, '=') { | 81 | match split_once(entry, '=') { |
82 | Some((k, v)) => cfg_key_values.push((k.to_string(), v.to_string())), | 82 | Some((k, v)) => cfg_key_values.push((k.to_string(), v.to_string())), |
83 | None => cfg_atoms.push(entry.to_string()), | 83 | None => cfg_atoms.push(entry.to_string()), |
84 | } | 84 | } |
@@ -86,7 +86,7 @@ impl Fixture { | |||
86 | } | 86 | } |
87 | "env" => { | 87 | "env" => { |
88 | for key in value.split(',') { | 88 | for key in value.split(',') { |
89 | if let Some((k, v)) = split_delim(key, '=') { | 89 | if let Some((k, v)) = split_once(key, '=') { |
90 | env.insert(k.into(), v.into()); | 90 | env.insert(k.into(), v.into()); |
91 | } | 91 | } |
92 | } | 92 | } |