diff options
-rw-r--r-- | crates/ide/src/hover.rs | 1 | ||||
-rw-r--r-- | crates/ide/src/move_item.rs | 268 | ||||
-rw-r--r-- | crates/profile/src/google_cpu_profiler.rs | 19 | ||||
-rw-r--r-- | crates/profile/src/lib.rs | 37 | ||||
-rw-r--r-- | crates/rust-analyzer/src/benchmarks.rs | 2 | ||||
-rw-r--r-- | docs/dev/README.md | 73 |
6 files changed, 337 insertions, 63 deletions
diff --git a/crates/ide/src/hover.rs b/crates/ide/src/hover.rs index 3c951c507..02a1a5b37 100644 --- a/crates/ide/src/hover.rs +++ b/crates/ide/src/hover.rs | |||
@@ -470,6 +470,7 @@ fn find_std_module(famous_defs: &FamousDefs, name: &str) -> Option<hir::Module> | |||
470 | 470 | ||
471 | fn pick_best(tokens: TokenAtOffset<SyntaxToken>) -> Option<SyntaxToken> { | 471 | fn pick_best(tokens: TokenAtOffset<SyntaxToken>) -> Option<SyntaxToken> { |
472 | return tokens.max_by_key(priority); | 472 | return tokens.max_by_key(priority); |
473 | |||
473 | fn priority(n: &SyntaxToken) -> usize { | 474 | fn priority(n: &SyntaxToken) -> usize { |
474 | match n.kind() { | 475 | match n.kind() { |
475 | IDENT | INT_NUMBER | LIFETIME_IDENT | T![self] | T![super] | T![crate] => 3, | 476 | IDENT | INT_NUMBER | LIFETIME_IDENT | T![self] | T![super] | T![crate] => 3, |
diff --git a/crates/ide/src/move_item.rs b/crates/ide/src/move_item.rs index 05fa8fc13..d36dcd4e4 100644 --- a/crates/ide/src/move_item.rs +++ b/crates/ide/src/move_item.rs | |||
@@ -4,10 +4,12 @@ use hir::Semantics; | |||
4 | use ide_db::{base_db::FileRange, RootDatabase}; | 4 | use ide_db::{base_db::FileRange, RootDatabase}; |
5 | use itertools::Itertools; | 5 | use itertools::Itertools; |
6 | use syntax::{ | 6 | use syntax::{ |
7 | algo, ast, match_ast, AstNode, NodeOrToken, SyntaxElement, SyntaxKind, SyntaxNode, TextRange, | 7 | algo, ast, match_ast, AstNode, SyntaxElement, SyntaxKind, SyntaxNode, SyntaxToken, TextRange, |
8 | TokenAtOffset, | ||
8 | }; | 9 | }; |
9 | use text_edit::{TextEdit, TextEditBuilder}; | 10 | use text_edit::{TextEdit, TextEditBuilder}; |
10 | 11 | ||
12 | #[derive(Copy, Clone, Debug)] | ||
11 | pub enum Direction { | 13 | pub enum Direction { |
12 | Up, | 14 | Up, |
13 | Down, | 15 | Down, |
@@ -31,14 +33,19 @@ pub(crate) fn move_item( | |||
31 | let sema = Semantics::new(db); | 33 | let sema = Semantics::new(db); |
32 | let file = sema.parse(range.file_id); | 34 | let file = sema.parse(range.file_id); |
33 | 35 | ||
34 | let item = file.syntax().covering_element(range.range); | 36 | let item = if range.range.is_empty() { |
37 | SyntaxElement::Token(pick_best(file.syntax().token_at_offset(range.range.start()))?) | ||
38 | } else { | ||
39 | file.syntax().covering_element(range.range) | ||
40 | }; | ||
41 | |||
35 | find_ancestors(item, direction, range.range) | 42 | find_ancestors(item, direction, range.range) |
36 | } | 43 | } |
37 | 44 | ||
38 | fn find_ancestors(item: SyntaxElement, direction: Direction, range: TextRange) -> Option<TextEdit> { | 45 | fn find_ancestors(item: SyntaxElement, direction: Direction, range: TextRange) -> Option<TextEdit> { |
39 | let root = match item { | 46 | let root = match item { |
40 | NodeOrToken::Node(node) => node, | 47 | SyntaxElement::Node(node) => node, |
41 | NodeOrToken::Token(token) => token.parent()?, | 48 | SyntaxElement::Token(token) => token.parent()?, |
42 | }; | 49 | }; |
43 | 50 | ||
44 | let movable = [ | 51 | let movable = [ |
@@ -51,6 +58,11 @@ fn find_ancestors(item: SyntaxElement, direction: Direction, range: TextRange) - | |||
51 | SyntaxKind::PARAM, | 58 | SyntaxKind::PARAM, |
52 | SyntaxKind::LET_STMT, | 59 | SyntaxKind::LET_STMT, |
53 | SyntaxKind::EXPR_STMT, | 60 | SyntaxKind::EXPR_STMT, |
61 | SyntaxKind::IF_EXPR, | ||
62 | SyntaxKind::FOR_EXPR, | ||
63 | SyntaxKind::LOOP_EXPR, | ||
64 | SyntaxKind::WHILE_EXPR, | ||
65 | SyntaxKind::RETURN_EXPR, | ||
54 | SyntaxKind::MATCH_EXPR, | 66 | SyntaxKind::MATCH_EXPR, |
55 | SyntaxKind::MACRO_CALL, | 67 | SyntaxKind::MACRO_CALL, |
56 | SyntaxKind::TYPE_ALIAS, | 68 | SyntaxKind::TYPE_ALIAS, |
@@ -83,11 +95,11 @@ fn move_in_direction( | |||
83 | ) -> Option<TextEdit> { | 95 | ) -> Option<TextEdit> { |
84 | match_ast! { | 96 | match_ast! { |
85 | match node { | 97 | match node { |
86 | ast::ArgList(it) => swap_sibling_in_list(it.args(), range, direction), | 98 | ast::ArgList(it) => swap_sibling_in_list(node, it.args(), range, direction), |
87 | ast::GenericParamList(it) => swap_sibling_in_list(it.generic_params(), range, direction), | 99 | ast::GenericParamList(it) => swap_sibling_in_list(node, it.generic_params(), range, direction), |
88 | ast::GenericArgList(it) => swap_sibling_in_list(it.generic_args(), range, direction), | 100 | ast::GenericArgList(it) => swap_sibling_in_list(node, it.generic_args(), range, direction), |
89 | ast::VariantList(it) => swap_sibling_in_list(it.variants(), range, direction), | 101 | ast::VariantList(it) => swap_sibling_in_list(node, it.variants(), range, direction), |
90 | ast::TypeBoundList(it) => swap_sibling_in_list(it.bounds(), range, direction), | 102 | ast::TypeBoundList(it) => swap_sibling_in_list(node, it.bounds(), range, direction), |
91 | _ => Some(replace_nodes(node, &match direction { | 103 | _ => Some(replace_nodes(node, &match direction { |
92 | Direction::Up => node.prev_sibling(), | 104 | Direction::Up => node.prev_sibling(), |
93 | Direction::Down => node.next_sibling(), | 105 | Direction::Down => node.next_sibling(), |
@@ -97,19 +109,27 @@ fn move_in_direction( | |||
97 | } | 109 | } |
98 | 110 | ||
99 | fn swap_sibling_in_list<A: AstNode + Clone, I: Iterator<Item = A>>( | 111 | fn swap_sibling_in_list<A: AstNode + Clone, I: Iterator<Item = A>>( |
112 | node: &SyntaxNode, | ||
100 | list: I, | 113 | list: I, |
101 | range: TextRange, | 114 | range: TextRange, |
102 | direction: Direction, | 115 | direction: Direction, |
103 | ) -> Option<TextEdit> { | 116 | ) -> Option<TextEdit> { |
104 | let (l, r) = list | 117 | let list_lookup = list |
105 | .tuple_windows() | 118 | .tuple_windows() |
106 | .filter(|(l, r)| match direction { | 119 | .filter(|(l, r)| match direction { |
107 | Direction::Up => r.syntax().text_range().contains_range(range), | 120 | Direction::Up => r.syntax().text_range().contains_range(range), |
108 | Direction::Down => l.syntax().text_range().contains_range(range), | 121 | Direction::Down => l.syntax().text_range().contains_range(range), |
109 | }) | 122 | }) |
110 | .next()?; | 123 | .next(); |
111 | 124 | ||
112 | Some(replace_nodes(l.syntax(), r.syntax())) | 125 | if let Some((l, r)) = list_lookup { |
126 | Some(replace_nodes(l.syntax(), r.syntax())) | ||
127 | } else { | ||
128 | // Cursor is beyond any movable list item (for example, on curly brace in enum). | ||
129 | // It's not necessary, that parent of list is movable (arg list's parent is not, for example), | ||
130 | // and we have to continue tree traversal to find suitable node. | ||
131 | find_ancestors(SyntaxElement::Node(node.parent()?), direction, range) | ||
132 | } | ||
113 | } | 133 | } |
114 | 134 | ||
115 | fn replace_nodes(first: &SyntaxNode, second: &SyntaxNode) -> TextEdit { | 135 | fn replace_nodes(first: &SyntaxNode, second: &SyntaxNode) -> TextEdit { |
@@ -121,6 +141,18 @@ fn replace_nodes(first: &SyntaxNode, second: &SyntaxNode) -> TextEdit { | |||
121 | edit.finish() | 141 | edit.finish() |
122 | } | 142 | } |
123 | 143 | ||
144 | fn pick_best(tokens: TokenAtOffset<SyntaxToken>) -> Option<SyntaxToken> { | ||
145 | return tokens.max_by_key(priority); | ||
146 | |||
147 | fn priority(n: &SyntaxToken) -> usize { | ||
148 | match n.kind() { | ||
149 | SyntaxKind::IDENT | SyntaxKind::LIFETIME_IDENT => 2, | ||
150 | kind if kind.is_trivia() => 0, | ||
151 | _ => 1, | ||
152 | } | ||
153 | } | ||
154 | } | ||
155 | |||
124 | #[cfg(test)] | 156 | #[cfg(test)] |
125 | mod tests { | 157 | mod tests { |
126 | use crate::fixture; | 158 | use crate::fixture; |
@@ -265,6 +297,107 @@ fn main() { | |||
265 | "#]], | 297 | "#]], |
266 | Direction::Up, | 298 | Direction::Up, |
267 | ); | 299 | ); |
300 | check( | ||
301 | r#" | ||
302 | fn main() { | ||
303 | println!("Hello, world"); | ||
304 | |||
305 | if true { | ||
306 | println!("Test"); | ||
307 | }$0$0 | ||
308 | } | ||
309 | "#, | ||
310 | expect![[r#" | ||
311 | fn main() { | ||
312 | if true { | ||
313 | println!("Test"); | ||
314 | } | ||
315 | |||
316 | println!("Hello, world"); | ||
317 | } | ||
318 | "#]], | ||
319 | Direction::Up, | ||
320 | ); | ||
321 | check( | ||
322 | r#" | ||
323 | fn main() { | ||
324 | println!("Hello, world"); | ||
325 | |||
326 | for i in 0..10 { | ||
327 | println!("Test"); | ||
328 | }$0$0 | ||
329 | } | ||
330 | "#, | ||
331 | expect![[r#" | ||
332 | fn main() { | ||
333 | for i in 0..10 { | ||
334 | println!("Test"); | ||
335 | } | ||
336 | |||
337 | println!("Hello, world"); | ||
338 | } | ||
339 | "#]], | ||
340 | Direction::Up, | ||
341 | ); | ||
342 | check( | ||
343 | r#" | ||
344 | fn main() { | ||
345 | println!("Hello, world"); | ||
346 | |||
347 | loop { | ||
348 | println!("Test"); | ||
349 | }$0$0 | ||
350 | } | ||
351 | "#, | ||
352 | expect![[r#" | ||
353 | fn main() { | ||
354 | loop { | ||
355 | println!("Test"); | ||
356 | } | ||
357 | |||
358 | println!("Hello, world"); | ||
359 | } | ||
360 | "#]], | ||
361 | Direction::Up, | ||
362 | ); | ||
363 | check( | ||
364 | r#" | ||
365 | fn main() { | ||
366 | println!("Hello, world"); | ||
367 | |||
368 | while true { | ||
369 | println!("Test"); | ||
370 | }$0$0 | ||
371 | } | ||
372 | "#, | ||
373 | expect![[r#" | ||
374 | fn main() { | ||
375 | while true { | ||
376 | println!("Test"); | ||
377 | } | ||
378 | |||
379 | println!("Hello, world"); | ||
380 | } | ||
381 | "#]], | ||
382 | Direction::Up, | ||
383 | ); | ||
384 | check( | ||
385 | r#" | ||
386 | fn main() { | ||
387 | println!("Hello, world"); | ||
388 | |||
389 | return 123;$0$0 | ||
390 | } | ||
391 | "#, | ||
392 | expect![[r#" | ||
393 | fn main() { | ||
394 | return 123; | ||
395 | |||
396 | println!("Hello, world"); | ||
397 | } | ||
398 | "#]], | ||
399 | Direction::Up, | ||
400 | ); | ||
268 | } | 401 | } |
269 | 402 | ||
270 | #[test] | 403 | #[test] |
@@ -615,6 +748,115 @@ fn test() { | |||
615 | } | 748 | } |
616 | 749 | ||
617 | #[test] | 750 | #[test] |
751 | fn test_cursor_at_item_start() { | ||
752 | check( | ||
753 | r#" | ||
754 | $0$0#[derive(Debug)] | ||
755 | enum FooBar { | ||
756 | Foo, | ||
757 | Bar, | ||
758 | } | ||
759 | |||
760 | fn main() {} | ||
761 | "#, | ||
762 | expect![[r#" | ||
763 | fn main() {} | ||
764 | |||
765 | #[derive(Debug)] | ||
766 | enum FooBar { | ||
767 | Foo, | ||
768 | Bar, | ||
769 | } | ||
770 | "#]], | ||
771 | Direction::Down, | ||
772 | ); | ||
773 | check( | ||
774 | r#" | ||
775 | $0$0enum FooBar { | ||
776 | Foo, | ||
777 | Bar, | ||
778 | } | ||
779 | |||
780 | fn main() {} | ||
781 | "#, | ||
782 | expect![[r#" | ||
783 | fn main() {} | ||
784 | |||
785 | enum FooBar { | ||
786 | Foo, | ||
787 | Bar, | ||
788 | } | ||
789 | "#]], | ||
790 | Direction::Down, | ||
791 | ); | ||
792 | check( | ||
793 | r#" | ||
794 | struct Test; | ||
795 | |||
796 | trait SomeTrait {} | ||
797 | |||
798 | $0$0impl SomeTrait for Test {} | ||
799 | |||
800 | fn main() {} | ||
801 | "#, | ||
802 | expect![[r#" | ||
803 | struct Test; | ||
804 | |||
805 | impl SomeTrait for Test {} | ||
806 | |||
807 | trait SomeTrait {} | ||
808 | |||
809 | fn main() {} | ||
810 | "#]], | ||
811 | Direction::Up, | ||
812 | ); | ||
813 | } | ||
814 | |||
815 | #[test] | ||
816 | fn test_cursor_at_item_end() { | ||
817 | check( | ||
818 | r#" | ||
819 | enum FooBar { | ||
820 | Foo, | ||
821 | Bar, | ||
822 | }$0$0 | ||
823 | |||
824 | fn main() {} | ||
825 | "#, | ||
826 | expect![[r#" | ||
827 | fn main() {} | ||
828 | |||
829 | enum FooBar { | ||
830 | Foo, | ||
831 | Bar, | ||
832 | } | ||
833 | "#]], | ||
834 | Direction::Down, | ||
835 | ); | ||
836 | check( | ||
837 | r#" | ||
838 | struct Test; | ||
839 | |||
840 | trait SomeTrait {} | ||
841 | |||
842 | impl SomeTrait for Test {}$0$0 | ||
843 | |||
844 | fn main() {} | ||
845 | "#, | ||
846 | expect![[r#" | ||
847 | struct Test; | ||
848 | |||
849 | impl SomeTrait for Test {} | ||
850 | |||
851 | trait SomeTrait {} | ||
852 | |||
853 | fn main() {} | ||
854 | "#]], | ||
855 | Direction::Up, | ||
856 | ); | ||
857 | } | ||
858 | |||
859 | #[test] | ||
618 | fn handles_empty_file() { | 860 | fn handles_empty_file() { |
619 | check(r#"$0$0"#, expect![[r#""#]], Direction::Up); | 861 | check(r#"$0$0"#, expect![[r#""#]], Direction::Up); |
620 | } | 862 | } |
diff --git a/crates/profile/src/google_cpu_profiler.rs b/crates/profile/src/google_cpu_profiler.rs index db865c65b..cae6caeaa 100644 --- a/crates/profile/src/google_cpu_profiler.rs +++ b/crates/profile/src/google_cpu_profiler.rs | |||
@@ -14,26 +14,31 @@ extern "C" { | |||
14 | fn ProfilerStop(); | 14 | fn ProfilerStop(); |
15 | } | 15 | } |
16 | 16 | ||
17 | static PROFILER_STATE: AtomicUsize = AtomicUsize::new(OFF); | ||
18 | const OFF: usize = 0; | 17 | const OFF: usize = 0; |
19 | const ON: usize = 1; | 18 | const ON: usize = 1; |
20 | const PENDING: usize = 2; | 19 | const PENDING: usize = 2; |
21 | 20 | ||
22 | pub fn start(path: &Path) { | 21 | fn transition(current: usize, new: usize) -> bool { |
23 | if PROFILER_STATE.compare_and_swap(OFF, PENDING, Ordering::SeqCst) != OFF { | 22 | static STATE: AtomicUsize = AtomicUsize::new(OFF); |
23 | |||
24 | STATE.compare_exchange(current, new, Ordering::SeqCst, Ordering::SeqCst).is_ok() | ||
25 | } | ||
26 | |||
27 | pub(crate) fn start(path: &Path) { | ||
28 | if !transition(OFF, PENDING) { | ||
24 | panic!("profiler already started"); | 29 | panic!("profiler already started"); |
25 | } | 30 | } |
26 | let path = CString::new(path.display().to_string()).unwrap(); | 31 | let path = CString::new(path.display().to_string()).unwrap(); |
27 | if unsafe { ProfilerStart(path.as_ptr()) } == 0 { | 32 | if unsafe { ProfilerStart(path.as_ptr()) } == 0 { |
28 | panic!("profiler failed to start") | 33 | panic!("profiler failed to start") |
29 | } | 34 | } |
30 | assert!(PROFILER_STATE.compare_and_swap(PENDING, ON, Ordering::SeqCst) == PENDING); | 35 | assert!(transition(PENDING, ON)); |
31 | } | 36 | } |
32 | 37 | ||
33 | pub fn stop() { | 38 | pub(crate) fn stop() { |
34 | if PROFILER_STATE.compare_and_swap(ON, PENDING, Ordering::SeqCst) != ON { | 39 | if !transition(ON, PENDING) { |
35 | panic!("profiler is not started") | 40 | panic!("profiler is not started") |
36 | } | 41 | } |
37 | unsafe { ProfilerStop() }; | 42 | unsafe { ProfilerStop() }; |
38 | assert!(PROFILER_STATE.compare_and_swap(PENDING, OFF, Ordering::SeqCst) == PENDING); | 43 | assert!(transition(PENDING, OFF)); |
39 | } | 44 | } |
diff --git a/crates/profile/src/lib.rs b/crates/profile/src/lib.rs index 9ca6341db..a31fb8f43 100644 --- a/crates/profile/src/lib.rs +++ b/crates/profile/src/lib.rs | |||
@@ -52,7 +52,7 @@ impl Drop for Scope { | |||
52 | /// Usage: | 52 | /// Usage: |
53 | /// 1. Install gpref_tools (https://github.com/gperftools/gperftools), probably packaged with your Linux distro. | 53 | /// 1. Install gpref_tools (https://github.com/gperftools/gperftools), probably packaged with your Linux distro. |
54 | /// 2. Build with `cpu_profiler` feature. | 54 | /// 2. Build with `cpu_profiler` feature. |
55 | /// 3. Tun the code, the *raw* output would be in the `./out.profile` file. | 55 | /// 3. Run the code, the *raw* output would be in the `./out.profile` file. |
56 | /// 4. Install pprof for visualization (https://github.com/google/pprof). | 56 | /// 4. Install pprof for visualization (https://github.com/google/pprof). |
57 | /// 5. Bump sampling frequency to once per ms: `export CPUPROFILE_FREQUENCY=1000` | 57 | /// 5. Bump sampling frequency to once per ms: `export CPUPROFILE_FREQUENCY=1000` |
58 | /// 6. Use something like `pprof -svg target/release/rust-analyzer ./out.profile` to see the results. | 58 | /// 6. Use something like `pprof -svg target/release/rust-analyzer ./out.profile` to see the results. |
@@ -60,8 +60,17 @@ impl Drop for Scope { | |||
60 | /// For example, here's how I run profiling on NixOS: | 60 | /// For example, here's how I run profiling on NixOS: |
61 | /// | 61 | /// |
62 | /// ```bash | 62 | /// ```bash |
63 | /// $ nix-shell -p gperftools --run \ | 63 | /// $ bat -p shell.nix |
64 | /// 'cargo run --release -p rust-analyzer -- parse < ~/projects/rustbench/parser.rs > /dev/null' | 64 | /// with import <nixpkgs> {}; |
65 | /// mkShell { | ||
66 | /// buildInputs = [ gperftools ]; | ||
67 | /// shellHook = '' | ||
68 | /// export LD_LIBRARY_PATH="${gperftools}/lib:" | ||
69 | /// ''; | ||
70 | /// } | ||
71 | /// $ set -x CPUPROFILE_FREQUENCY 1000 | ||
72 | /// $ nix-shell --run 'cargo test --release --package rust-analyzer --lib -- benchmarks::benchmark_integrated_highlighting --exact --nocapture' | ||
73 | /// $ pprof -svg target/release/deps/rust_analyzer-8739592dc93d63cb crates/rust-analyzer/out.profile > profile.svg | ||
65 | /// ``` | 74 | /// ``` |
66 | /// | 75 | /// |
67 | /// See this diff for how to profile completions: | 76 | /// See this diff for how to profile completions: |
@@ -81,7 +90,9 @@ pub fn cpu_span() -> CpuSpan { | |||
81 | 90 | ||
82 | #[cfg(not(feature = "cpu_profiler"))] | 91 | #[cfg(not(feature = "cpu_profiler"))] |
83 | { | 92 | { |
84 | eprintln!("cpu_profiler feature is disabled") | 93 | eprintln!( |
94 | r#"cpu profiling is disabled, uncomment `default = [ "cpu_profiler" ]` in Cargo.toml to enable."# | ||
95 | ) | ||
85 | } | 96 | } |
86 | 97 | ||
87 | CpuSpan { _private: () } | 98 | CpuSpan { _private: () } |
@@ -91,7 +102,23 @@ impl Drop for CpuSpan { | |||
91 | fn drop(&mut self) { | 102 | fn drop(&mut self) { |
92 | #[cfg(feature = "cpu_profiler")] | 103 | #[cfg(feature = "cpu_profiler")] |
93 | { | 104 | { |
94 | google_cpu_profiler::stop() | 105 | google_cpu_profiler::stop(); |
106 | let profile_data = std::env::current_dir().unwrap().join("out.profile"); | ||
107 | eprintln!("Profile data saved to:\n\n {}\n", profile_data.display()); | ||
108 | let mut cmd = std::process::Command::new("pprof"); | ||
109 | cmd.arg("-svg").arg(std::env::current_exe().unwrap()).arg(&profile_data); | ||
110 | let out = cmd.output(); | ||
111 | |||
112 | match out { | ||
113 | Ok(out) if out.status.success() => { | ||
114 | let svg = profile_data.with_extension("svg"); | ||
115 | std::fs::write(&svg, &out.stdout).unwrap(); | ||
116 | eprintln!("Profile rendered to:\n\n {}\n", svg.display()); | ||
117 | } | ||
118 | _ => { | ||
119 | eprintln!("Failed to run:\n\n {:?}\n", cmd); | ||
120 | } | ||
121 | } | ||
95 | } | 122 | } |
96 | } | 123 | } |
97 | } | 124 | } |
diff --git a/crates/rust-analyzer/src/benchmarks.rs b/crates/rust-analyzer/src/benchmarks.rs index a6f997af8..bf569b40b 100644 --- a/crates/rust-analyzer/src/benchmarks.rs +++ b/crates/rust-analyzer/src/benchmarks.rs | |||
@@ -51,6 +51,7 @@ fn benchmark_integrated_highlighting() { | |||
51 | } | 51 | } |
52 | 52 | ||
53 | profile::init_from("*>100"); | 53 | profile::init_from("*>100"); |
54 | // let _s = profile::heartbeat_span(); | ||
54 | 55 | ||
55 | { | 56 | { |
56 | let _it = stdx::timeit("change"); | 57 | let _it = stdx::timeit("change"); |
@@ -63,6 +64,7 @@ fn benchmark_integrated_highlighting() { | |||
63 | 64 | ||
64 | { | 65 | { |
65 | let _it = stdx::timeit("after change"); | 66 | let _it = stdx::timeit("after change"); |
67 | let _span = profile::cpu_span(); | ||
66 | let analysis = host.analysis(); | 68 | let analysis = host.analysis(); |
67 | analysis.highlight_as_html(file_id, false).unwrap(); | 69 | analysis.highlight_as_html(file_id, false).unwrap(); |
68 | } | 70 | } |
diff --git a/docs/dev/README.md b/docs/dev/README.md index dcbab1a1d..eab21a765 100644 --- a/docs/dev/README.md +++ b/docs/dev/README.md | |||
@@ -1,7 +1,7 @@ | |||
1 | # Contributing Quick Start | 1 | # Contributing Quick Start |
2 | 2 | ||
3 | Rust Analyzer is an ordinary Rust project, which is organized as a Cargo | 3 | Rust Analyzer is an ordinary Rust project, which is organized as a Cargo workspace, builds on stable and doesn't depend on C libraries. |
4 | workspace, builds on stable and doesn't depend on C libraries. So, just | 4 | So, just |
5 | 5 | ||
6 | ``` | 6 | ``` |
7 | $ cargo test | 7 | $ cargo test |
@@ -13,9 +13,8 @@ To learn more about how rust-analyzer works, see [./architecture.md](./architect | |||
13 | It also explains the high-level layout of the source code. | 13 | It also explains the high-level layout of the source code. |
14 | Do skim through that document. | 14 | Do skim through that document. |
15 | 15 | ||
16 | We also publish rustdoc docs to pages: | 16 | We also publish rustdoc docs to pages: https://rust-analyzer.github.io/rust-analyzer/ide/. |
17 | 17 | Note though, that internal documentation is very incomplete. | |
18 | https://rust-analyzer.github.io/rust-analyzer/ide/ | ||
19 | 18 | ||
20 | Various organizational and process issues are discussed in this document. | 19 | Various organizational and process issues are discussed in this document. |
21 | 20 | ||
@@ -49,21 +48,28 @@ https://rust-lang.zulipchat.com/#narrow/stream/185405-t-compiler.2Fwg-rls-2.2E0 | |||
49 | Also a kind of fun. | 48 | Also a kind of fun. |
50 | These issues should generally include a link to a Zulip discussion thread. | 49 | These issues should generally include a link to a Zulip discussion thread. |
51 | 50 | ||
52 | # CI | 51 | # Code Style & Review Process |
52 | |||
53 | Do see [./style.md](./style.md). | ||
53 | 54 | ||
54 | We use GitHub Actions for CI. Most of the things, including formatting, are checked by | 55 | # Cookbook |
55 | `cargo test` so, if `cargo test` passes locally, that's a good sign that CI will | 56 | |
56 | be green as well. The only exception is that some long-running tests are skipped locally by default. | 57 | ## CI |
58 | |||
59 | We use GitHub Actions for CI. | ||
60 | Most of the things, including formatting, are checked by `cargo test`. | ||
61 | If `cargo test` passes locally, that's a good sign that CI will be green as well. | ||
62 | The only exception is that some long-running tests are skipped locally by default. | ||
57 | Use `env RUN_SLOW_TESTS=1 cargo test` to run the full suite. | 63 | Use `env RUN_SLOW_TESTS=1 cargo test` to run the full suite. |
58 | 64 | ||
59 | We use bors-ng to enforce the [not rocket science](https://graydon2.dreamwidth.org/1597.html) rule. | 65 | We use bors-ng to enforce the [not rocket science](https://graydon2.dreamwidth.org/1597.html) rule. |
60 | 66 | ||
61 | # Launching rust-analyzer | 67 | ## Launching rust-analyzer |
62 | 68 | ||
63 | Debugging the language server can be tricky. | 69 | Debugging the language server can be tricky. |
64 | LSP is rather chatty, so driving it from the command line is not really feasible, driving it via VS Code requires interacting with two processes. | 70 | LSP is rather chatty, so driving it from the command line is not really feasible, driving it via VS Code requires interacting with two processes. |
65 | 71 | ||
66 | For this reason, the best way to see how rust-analyzer works is to find a relevant test and execute it. | 72 | For this reason, the best way to see how rust-analyzer works is to **find a relevant test and execute it**. |
67 | VS Code & Emacs include an action for running a single test. | 73 | VS Code & Emacs include an action for running a single test. |
68 | 74 | ||
69 | Launching a VS Code instance with a locally built language server is also possible. | 75 | Launching a VS Code instance with a locally built language server is also possible. |
@@ -107,12 +113,7 @@ cd editors/code | |||
107 | npm ci | 113 | npm ci |
108 | npm run lint | 114 | npm run lint |
109 | ``` | 115 | ``` |
110 | 116 | ## How to ... | |
111 | # Code Style & Review Process | ||
112 | |||
113 | Do see [./style.md](./style.md). | ||
114 | |||
115 | # How to ... | ||
116 | 117 | ||
117 | * ... add an assist? [#7535](https://github.com/rust-analyzer/rust-analyzer/pull/7535) | 118 | * ... add an assist? [#7535](https://github.com/rust-analyzer/rust-analyzer/pull/7535) |
118 | * ... add a new protocol extension? [#4569](https://github.com/rust-analyzer/rust-analyzer/pull/4569) | 119 | * ... add a new protocol extension? [#4569](https://github.com/rust-analyzer/rust-analyzer/pull/4569) |
@@ -120,18 +121,17 @@ Do see [./style.md](./style.md). | |||
120 | * ... add a new completion? [#6964](https://github.com/rust-analyzer/rust-analyzer/pull/6964) | 121 | * ... add a new completion? [#6964](https://github.com/rust-analyzer/rust-analyzer/pull/6964) |
121 | * ... allow new syntax in the parser? [#7338](https://github.com/rust-analyzer/rust-analyzer/pull/7338) | 122 | * ... allow new syntax in the parser? [#7338](https://github.com/rust-analyzer/rust-analyzer/pull/7338) |
122 | 123 | ||
123 | # Logging | 124 | ## Logging |
124 | 125 | ||
125 | Logging is done by both rust-analyzer and VS Code, so it might be tricky to | 126 | Logging is done by both rust-analyzer and VS Code, so it might be tricky to figure out where logs go. |
126 | figure out where logs go. | ||
127 | 127 | ||
128 | Inside rust-analyzer, we use the standard `log` crate for logging, and | 128 | Inside rust-analyzer, we use the standard `log` crate for logging, and `env_logger` for logging frontend. |
129 | `env_logger` for logging frontend. By default, log goes to stderr, but the | 129 | By default, log goes to stderr, but the stderr itself is processed by VS Code. |
130 | stderr itself is processed by VS Code. | 130 | `--log-file <PATH>` CLI argument allows logging to file. |
131 | 131 | ||
132 | To see stderr in the running VS Code instance, go to the "Output" tab of the | 132 | To see stderr in the running VS Code instance, go to the "Output" tab of the panel and select `rust-analyzer`. |
133 | panel and select `rust-analyzer`. This shows `eprintln!` as well. Note that | 133 | This shows `eprintln!` as well. |
134 | `stdout` is used for the actual protocol, so `println!` will break things. | 134 | Note that `stdout` is used for the actual protocol, so `println!` will break things. |
135 | 135 | ||
136 | To log all communication between the server and the client, there are two choices: | 136 | To log all communication between the server and the client, there are two choices: |
137 | 137 | ||
@@ -139,17 +139,12 @@ To log all communication between the server and the client, there are two choice | |||
139 | ``` | 139 | ``` |
140 | env RA_LOG=lsp_server=debug code . | 140 | env RA_LOG=lsp_server=debug code . |
141 | ``` | 141 | ``` |
142 | * You can log on the client side, by enabling `"rust-analyzer.trace.server": "verbose"` workspace setting. | ||
143 | These logs are shown in a separate tab in the output and could be used with LSP inspector. | ||
144 | Kudos to [@DJMcNab](https://github.com/DJMcNab) for setting this awesome infra up! | ||
142 | 145 | ||
143 | By default, logs go to stderr, `--log-file <PATH>` CLI argument overrides | ||
144 | that. | ||
145 | 146 | ||
146 | * You can log on the client side, by enabling `"rust-analyzer.trace.server": | 147 | There are also several VS Code commands which might be of interest: |
147 | "verbose"` workspace setting. These logs are shown in a separate tab in the | ||
148 | output and could be used with LSP inspector. Kudos to | ||
149 | [@DJMcNab](https://github.com/DJMcNab) for setting this awesome infra up! | ||
150 | |||
151 | |||
152 | There are also two VS Code commands which might be of interest: | ||
153 | 148 | ||
154 | * `Rust Analyzer: Status` shows some memory-usage statistics. | 149 | * `Rust Analyzer: Status` shows some memory-usage statistics. |
155 | 150 | ||
@@ -167,7 +162,7 @@ There are also two VS Code commands which might be of interest: | |||
167 | 162 | ||
168 | ![demo](https://user-images.githubusercontent.com/36276403/78225773-6636a480-74d3-11ea-9d9f-1c9d42da03b0.png) | 163 | ![demo](https://user-images.githubusercontent.com/36276403/78225773-6636a480-74d3-11ea-9d9f-1c9d42da03b0.png) |
169 | 164 | ||
170 | # Profiling | 165 | ## Profiling |
171 | 166 | ||
172 | We have a built-in hierarchical profiler, you can enable it by using `RA_PROFILE` env-var: | 167 | We have a built-in hierarchical profiler, you can enable it by using `RA_PROFILE` env-var: |
173 | 168 | ||
@@ -195,7 +190,9 @@ $ cargo run --release -p rust-analyzer -- analysis-bench ../chalk/ --highlight . | |||
195 | $ cargo run --release -p rust-analyzer -- analysis-bench ../chalk/ --complete ../chalk/chalk-engine/src/logic.rs:94:0 | 190 | $ cargo run --release -p rust-analyzer -- analysis-bench ../chalk/ --complete ../chalk/chalk-engine/src/logic.rs:94:0 |
196 | ``` | 191 | ``` |
197 | 192 | ||
198 | # Release Process | 193 | Look for `fn benchmark_xxx` tests for a quick way to reproduce performance problems. |
194 | |||
195 | ## Release Process | ||
199 | 196 | ||
200 | Release process is handled by `release`, `dist` and `promote` xtasks, `release` being the main one. | 197 | Release process is handled by `release`, `dist` and `promote` xtasks, `release` being the main one. |
201 | 198 | ||
@@ -232,7 +229,7 @@ Make sure to remove the new changelog post created when running `cargo xtask rel | |||
232 | We release "nightly" every night automatically and promote the latest nightly to "stable" manually, every week. | 229 | We release "nightly" every night automatically and promote the latest nightly to "stable" manually, every week. |
233 | We don't do "patch" releases, unless something truly egregious comes up. | 230 | We don't do "patch" releases, unless something truly egregious comes up. |
234 | 231 | ||
235 | # Permissions | 232 | ## Permissions |
236 | 233 | ||
237 | There are three sets of people with extra permissions: | 234 | There are three sets of people with extra permissions: |
238 | 235 | ||