diff options
-rw-r--r-- | crates/hir/src/semantics.rs | 9 | ||||
-rw-r--r-- | crates/ide/src/completion/completion_context.rs | 2 | ||||
-rw-r--r-- | crates/ssr/src/resolving.rs | 4 | ||||
-rw-r--r-- | crates/vfs/src/vfs_path.rs | 139 | ||||
-rw-r--r-- | docs/dev/README.md | 5 | ||||
-rw-r--r-- | docs/dev/style.md | 18 | ||||
-rw-r--r-- | xtask/Cargo.toml | 1 |
7 files changed, 154 insertions, 24 deletions
diff --git a/crates/hir/src/semantics.rs b/crates/hir/src/semantics.rs index d8beac98a..3953017c3 100644 --- a/crates/hir/src/semantics.rs +++ b/crates/hir/src/semantics.rs | |||
@@ -112,14 +112,13 @@ impl<'db, DB: HirDatabase> Semantics<'db, DB> { | |||
112 | pub fn expand(&self, macro_call: &ast::MacroCall) -> Option<SyntaxNode> { | 112 | pub fn expand(&self, macro_call: &ast::MacroCall) -> Option<SyntaxNode> { |
113 | self.imp.expand(macro_call) | 113 | self.imp.expand(macro_call) |
114 | } | 114 | } |
115 | 115 | pub fn speculative_expand( | |
116 | pub fn expand_hypothetical( | ||
117 | &self, | 116 | &self, |
118 | actual_macro_call: &ast::MacroCall, | 117 | actual_macro_call: &ast::MacroCall, |
119 | hypothetical_args: &ast::TokenTree, | 118 | hypothetical_args: &ast::TokenTree, |
120 | token_to_map: SyntaxToken, | 119 | token_to_map: SyntaxToken, |
121 | ) -> Option<(SyntaxNode, SyntaxToken)> { | 120 | ) -> Option<(SyntaxNode, SyntaxToken)> { |
122 | self.imp.expand_hypothetical(actual_macro_call, hypothetical_args, token_to_map) | 121 | self.imp.speculative_expand(actual_macro_call, hypothetical_args, token_to_map) |
123 | } | 122 | } |
124 | 123 | ||
125 | pub fn descend_into_macros(&self, token: SyntaxToken) -> SyntaxToken { | 124 | pub fn descend_into_macros(&self, token: SyntaxToken) -> SyntaxToken { |
@@ -311,7 +310,7 @@ impl<'db> SemanticsImpl<'db> { | |||
311 | Some(node) | 310 | Some(node) |
312 | } | 311 | } |
313 | 312 | ||
314 | fn expand_hypothetical( | 313 | fn speculative_expand( |
315 | &self, | 314 | &self, |
316 | actual_macro_call: &ast::MacroCall, | 315 | actual_macro_call: &ast::MacroCall, |
317 | hypothetical_args: &ast::TokenTree, | 316 | hypothetical_args: &ast::TokenTree, |
@@ -756,7 +755,7 @@ impl<'a> SemanticsScope<'a> { | |||
756 | 755 | ||
757 | /// Resolve a path as-if it was written at the given scope. This is | 756 | /// Resolve a path as-if it was written at the given scope. This is |
758 | /// necessary a heuristic, as it doesn't take hygiene into account. | 757 | /// necessary a heuristic, as it doesn't take hygiene into account. |
759 | pub fn resolve_hypothetical(&self, path: &ast::Path) -> Option<PathResolution> { | 758 | pub fn speculative_resolve(&self, path: &ast::Path) -> Option<PathResolution> { |
760 | let hygiene = Hygiene::new(self.db.upcast(), self.file_id); | 759 | let hygiene = Hygiene::new(self.db.upcast(), self.file_id); |
761 | let path = Path::from_src(path.clone(), &hygiene)?; | 760 | let path = Path::from_src(path.clone(), &hygiene)?; |
762 | self.resolve_hir_path(&path) | 761 | self.resolve_hir_path(&path) |
diff --git a/crates/ide/src/completion/completion_context.rs b/crates/ide/src/completion/completion_context.rs index 3857dce67..85456a66f 100644 --- a/crates/ide/src/completion/completion_context.rs +++ b/crates/ide/src/completion/completion_context.rs | |||
@@ -185,7 +185,7 @@ impl<'a> CompletionContext<'a> { | |||
185 | }; | 185 | }; |
186 | if let (Some(actual_expansion), Some(hypothetical_expansion)) = ( | 186 | if let (Some(actual_expansion), Some(hypothetical_expansion)) = ( |
187 | ctx.sema.expand(&actual_macro_call), | 187 | ctx.sema.expand(&actual_macro_call), |
188 | ctx.sema.expand_hypothetical( | 188 | ctx.sema.speculative_expand( |
189 | &actual_macro_call, | 189 | &actual_macro_call, |
190 | &hypothetical_args, | 190 | &hypothetical_args, |
191 | fake_ident_token, | 191 | fake_ident_token, |
diff --git a/crates/ssr/src/resolving.rs b/crates/ssr/src/resolving.rs index 4441fb426..b932132d5 100644 --- a/crates/ssr/src/resolving.rs +++ b/crates/ssr/src/resolving.rs | |||
@@ -212,13 +212,13 @@ impl<'db> ResolutionScope<'db> { | |||
212 | // First try resolving the whole path. This will work for things like | 212 | // First try resolving the whole path. This will work for things like |
213 | // `std::collections::HashMap`, but will fail for things like | 213 | // `std::collections::HashMap`, but will fail for things like |
214 | // `std::collections::HashMap::new`. | 214 | // `std::collections::HashMap::new`. |
215 | if let Some(resolution) = self.scope.resolve_hypothetical(&path) { | 215 | if let Some(resolution) = self.scope.speculative_resolve(&path) { |
216 | return Some(resolution); | 216 | return Some(resolution); |
217 | } | 217 | } |
218 | // Resolution failed, try resolving the qualifier (e.g. `std::collections::HashMap` and if | 218 | // Resolution failed, try resolving the qualifier (e.g. `std::collections::HashMap` and if |
219 | // that succeeds, then iterate through the candidates on the resolved type with the provided | 219 | // that succeeds, then iterate through the candidates on the resolved type with the provided |
220 | // name. | 220 | // name. |
221 | let resolved_qualifier = self.scope.resolve_hypothetical(&path.qualifier()?)?; | 221 | let resolved_qualifier = self.scope.speculative_resolve(&path.qualifier()?)?; |
222 | if let hir::PathResolution::Def(hir::ModuleDef::Adt(adt)) = resolved_qualifier { | 222 | if let hir::PathResolution::Def(hir::ModuleDef::Adt(adt)) = resolved_qualifier { |
223 | let name = path.segment()?.name_ref()?; | 223 | let name = path.segment()?.name_ref()?; |
224 | adt.ty(self.scope.db).iterate_path_candidates( | 224 | adt.ty(self.scope.db).iterate_path_candidates( |
diff --git a/crates/vfs/src/vfs_path.rs b/crates/vfs/src/vfs_path.rs index 04a42264e..944a702df 100644 --- a/crates/vfs/src/vfs_path.rs +++ b/crates/vfs/src/vfs_path.rs | |||
@@ -57,23 +57,42 @@ impl VfsPath { | |||
57 | }; | 57 | }; |
58 | buf.push(tag); | 58 | buf.push(tag); |
59 | match &self.0 { | 59 | match &self.0 { |
60 | VfsPathRepr::PathBuf(it) => { | 60 | VfsPathRepr::PathBuf(path) => { |
61 | let path: &std::ffi::OsStr = it.as_os_str(); | ||
62 | #[cfg(windows)] | 61 | #[cfg(windows)] |
63 | { | 62 | { |
64 | use std::os::windows::ffi::OsStrExt; | 63 | use windows_paths::Encode; |
65 | for wchar in path.encode_wide() { | 64 | let components = path.components(); |
66 | buf.extend(wchar.to_le_bytes().iter().copied()); | 65 | let mut add_sep = false; |
66 | for component in components { | ||
67 | if add_sep { | ||
68 | windows_paths::SEP.encode(buf); | ||
69 | } | ||
70 | let len_before = buf.len(); | ||
71 | match component { | ||
72 | std::path::Component::Prefix(prefix) => { | ||
73 | // kind() returns a normalized and comparable path prefix. | ||
74 | prefix.kind().encode(buf); | ||
75 | } | ||
76 | std::path::Component::RootDir => { | ||
77 | if !add_sep { | ||
78 | component.as_os_str().encode(buf); | ||
79 | } | ||
80 | } | ||
81 | _ => component.as_os_str().encode(buf), | ||
82 | } | ||
83 | |||
84 | // some components may be encoded empty | ||
85 | add_sep = len_before != buf.len(); | ||
67 | } | 86 | } |
68 | } | 87 | } |
69 | #[cfg(unix)] | 88 | #[cfg(unix)] |
70 | { | 89 | { |
71 | use std::os::unix::ffi::OsStrExt; | 90 | use std::os::unix::ffi::OsStrExt; |
72 | buf.extend(path.as_bytes()); | 91 | buf.extend(path.as_os_str().as_bytes()); |
73 | } | 92 | } |
74 | #[cfg(not(any(windows, unix)))] | 93 | #[cfg(not(any(windows, unix)))] |
75 | { | 94 | { |
76 | buf.extend(path.to_string_lossy().as_bytes()); | 95 | buf.extend(path.as_os_str().to_string_lossy().as_bytes()); |
77 | } | 96 | } |
78 | } | 97 | } |
79 | VfsPathRepr::VirtualPath(VirtualPath(s)) => buf.extend(s.as_bytes()), | 98 | VfsPathRepr::VirtualPath(VirtualPath(s)) => buf.extend(s.as_bytes()), |
@@ -81,6 +100,112 @@ impl VfsPath { | |||
81 | } | 100 | } |
82 | } | 101 | } |
83 | 102 | ||
103 | #[cfg(windows)] | ||
104 | mod windows_paths { | ||
105 | pub trait Encode { | ||
106 | fn encode(&self, buf: &mut Vec<u8>); | ||
107 | } | ||
108 | |||
109 | impl Encode for std::ffi::OsStr { | ||
110 | fn encode(&self, buf: &mut Vec<u8>) { | ||
111 | use std::os::windows::ffi::OsStrExt; | ||
112 | for wchar in self.encode_wide() { | ||
113 | buf.extend(wchar.to_le_bytes().iter().copied()); | ||
114 | } | ||
115 | } | ||
116 | } | ||
117 | |||
118 | impl Encode for u8 { | ||
119 | fn encode(&self, buf: &mut Vec<u8>) { | ||
120 | let wide = *self as u16; | ||
121 | buf.extend(wide.to_le_bytes().iter().copied()) | ||
122 | } | ||
123 | } | ||
124 | |||
125 | impl Encode for &str { | ||
126 | fn encode(&self, buf: &mut Vec<u8>) { | ||
127 | debug_assert!(self.is_ascii()); | ||
128 | for b in self.as_bytes() { | ||
129 | b.encode(buf) | ||
130 | } | ||
131 | } | ||
132 | } | ||
133 | |||
134 | pub const SEP: &str = "\\"; | ||
135 | const VERBATIM: &str = "\\\\?\\"; | ||
136 | const UNC: &str = "UNC"; | ||
137 | const DEVICE: &str = "\\\\.\\"; | ||
138 | const COLON: &str = ":"; | ||
139 | |||
140 | impl Encode for std::path::Prefix<'_> { | ||
141 | fn encode(&self, buf: &mut Vec<u8>) { | ||
142 | match self { | ||
143 | std::path::Prefix::Verbatim(c) => { | ||
144 | VERBATIM.encode(buf); | ||
145 | c.encode(buf); | ||
146 | } | ||
147 | std::path::Prefix::VerbatimUNC(server, share) => { | ||
148 | VERBATIM.encode(buf); | ||
149 | UNC.encode(buf); | ||
150 | SEP.encode(buf); | ||
151 | server.encode(buf); | ||
152 | SEP.encode(buf); | ||
153 | share.encode(buf); | ||
154 | } | ||
155 | std::path::Prefix::VerbatimDisk(d) => { | ||
156 | VERBATIM.encode(buf); | ||
157 | d.encode(buf); | ||
158 | COLON.encode(buf); | ||
159 | } | ||
160 | std::path::Prefix::DeviceNS(device) => { | ||
161 | DEVICE.encode(buf); | ||
162 | device.encode(buf); | ||
163 | } | ||
164 | std::path::Prefix::UNC(server, share) => { | ||
165 | SEP.encode(buf); | ||
166 | SEP.encode(buf); | ||
167 | server.encode(buf); | ||
168 | SEP.encode(buf); | ||
169 | share.encode(buf); | ||
170 | } | ||
171 | std::path::Prefix::Disk(d) => { | ||
172 | d.encode(buf); | ||
173 | COLON.encode(buf); | ||
174 | } | ||
175 | } | ||
176 | } | ||
177 | } | ||
178 | #[test] | ||
179 | fn paths_encoding() { | ||
180 | // drive letter casing agnostic | ||
181 | test_eq("C:/x.rs", "c:/x.rs"); | ||
182 | // separator agnostic | ||
183 | test_eq("C:/x/y.rs", "C:\\x\\y.rs"); | ||
184 | |||
185 | fn test_eq(a: &str, b: &str) { | ||
186 | let mut b1 = Vec::new(); | ||
187 | let mut b2 = Vec::new(); | ||
188 | vfs(a).encode(&mut b1); | ||
189 | vfs(b).encode(&mut b2); | ||
190 | assert_eq!(b1, b2); | ||
191 | } | ||
192 | } | ||
193 | |||
194 | #[test] | ||
195 | fn test_sep_root_dir_encoding() { | ||
196 | let mut buf = Vec::new(); | ||
197 | vfs("C:/x/y").encode(&mut buf); | ||
198 | assert_eq!(&buf, &[0, 67, 0, 58, 0, 92, 0, 120, 0, 92, 0, 121, 0]) | ||
199 | } | ||
200 | |||
201 | #[cfg(test)] | ||
202 | fn vfs(str: &str) -> super::VfsPath { | ||
203 | use super::{AbsPathBuf, VfsPath}; | ||
204 | use std::convert::TryFrom; | ||
205 | VfsPath::from(AbsPathBuf::try_from(str).unwrap()) | ||
206 | } | ||
207 | } | ||
208 | |||
84 | #[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] | 209 | #[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] |
85 | enum VfsPathRepr { | 210 | enum VfsPathRepr { |
86 | PathBuf(AbsPathBuf), | 211 | PathBuf(AbsPathBuf), |
diff --git a/docs/dev/README.md b/docs/dev/README.md index ad18217f1..36edddc70 100644 --- a/docs/dev/README.md +++ b/docs/dev/README.md | |||
@@ -165,6 +165,11 @@ In general, API is centered around UI concerns -- the result of the call is what | |||
165 | The results are 100% Rust specific though. | 165 | The results are 100% Rust specific though. |
166 | Shout outs to LSP developers for popularizing the idea that "UI" is a good place to draw a boundary at. | 166 | Shout outs to LSP developers for popularizing the idea that "UI" is a good place to draw a boundary at. |
167 | 167 | ||
168 | ## CI | ||
169 | |||
170 | CI does not test rust-analyzer, CI is a core part of rust-analyzer, and is maintained with above average standard of quality. | ||
171 | CI is reproducible -- it can only be broken by changes to files in this repository, any dependence on externalities is a bug. | ||
172 | |||
168 | # Code Style & Review Process | 173 | # Code Style & Review Process |
169 | 174 | ||
170 | Do see [./style.md](./style.md). | 175 | Do see [./style.md](./style.md). |
diff --git a/docs/dev/style.md b/docs/dev/style.md index 3bbab6da9..963a6d73d 100644 --- a/docs/dev/style.md +++ b/docs/dev/style.md | |||
@@ -65,7 +65,7 @@ There are many benefits to this: | |||
65 | It also makes sense to format snippets more compactly (for example, by placing enum definitions like `enum E { Foo, Bar }` on a single line), | 65 | It also makes sense to format snippets more compactly (for example, by placing enum definitions like `enum E { Foo, Bar }` on a single line), |
66 | as long as they are still readable. | 66 | as long as they are still readable. |
67 | 67 | ||
68 | ## Order of Imports | 68 | # Order of Imports |
69 | 69 | ||
70 | Separate import groups with blank lines. | 70 | Separate import groups with blank lines. |
71 | Use one `use` per crate. | 71 | Use one `use` per crate. |
@@ -91,7 +91,7 @@ use super::{} | |||
91 | Module declarations come before the imports. | 91 | Module declarations come before the imports. |
92 | Order them in "suggested reading order" for a person new to the code base. | 92 | Order them in "suggested reading order" for a person new to the code base. |
93 | 93 | ||
94 | ## Import Style | 94 | # Import Style |
95 | 95 | ||
96 | Qualify items from `hir` and `ast`. | 96 | Qualify items from `hir` and `ast`. |
97 | 97 | ||
@@ -112,7 +112,7 @@ Avoid local `use MyEnum::*` imports. | |||
112 | 112 | ||
113 | Prefer `use crate::foo::bar` to `use super::bar`. | 113 | Prefer `use crate::foo::bar` to `use super::bar`. |
114 | 114 | ||
115 | ## Order of Items | 115 | # Order of Items |
116 | 116 | ||
117 | Optimize for the reader who sees the file for the first time, and wants to get a general idea about what's going on. | 117 | Optimize for the reader who sees the file for the first time, and wants to get a general idea about what's going on. |
118 | People read things from top to bottom, so place most important things first. | 118 | People read things from top to bottom, so place most important things first. |
@@ -143,7 +143,7 @@ struct Foo { | |||
143 | } | 143 | } |
144 | ``` | 144 | ``` |
145 | 145 | ||
146 | ## Variable Naming | 146 | # Variable Naming |
147 | 147 | ||
148 | Use boring and long names for local variables ([yay code completion](https://github.com/rust-analyzer/rust-analyzer/pull/4162#discussion_r417130973)). | 148 | Use boring and long names for local variables ([yay code completion](https://github.com/rust-analyzer/rust-analyzer/pull/4162#discussion_r417130973)). |
149 | The default name is a lowercased name of the type: `global_state: GlobalState`. | 149 | The default name is a lowercased name of the type: `global_state: GlobalState`. |
@@ -151,12 +151,12 @@ Avoid ad-hoc acronyms and contractions, but use the ones that exist consistently | |||
151 | The default name for "result of the function" local variable is `res`. | 151 | The default name for "result of the function" local variable is `res`. |
152 | The default name for "I don't really care about the name" variable is `it`. | 152 | The default name for "I don't really care about the name" variable is `it`. |
153 | 153 | ||
154 | ## Collection types | 154 | # Collection types |
155 | 155 | ||
156 | Prefer `rustc_hash::FxHashMap` and `rustc_hash::FxHashSet` instead of the ones in `std::collections`. | 156 | Prefer `rustc_hash::FxHashMap` and `rustc_hash::FxHashSet` instead of the ones in `std::collections`. |
157 | They use a hasher that's slightly faster and using them consistently will reduce code size by some small amount. | 157 | They use a hasher that's slightly faster and using them consistently will reduce code size by some small amount. |
158 | 158 | ||
159 | ## Preconditions | 159 | # Preconditions |
160 | 160 | ||
161 | Express function preconditions in types and force the caller to provide them (rather than checking in callee): | 161 | Express function preconditions in types and force the caller to provide them (rather than checking in callee): |
162 | 162 | ||
@@ -176,7 +176,7 @@ fn frobnicate(walrus: Option<Walrus>) { | |||
176 | } | 176 | } |
177 | ``` | 177 | ``` |
178 | 178 | ||
179 | ## Premature Pessimization | 179 | # Premature Pessimization |
180 | 180 | ||
181 | Avoid writing code which is slower than it needs to be. | 181 | Avoid writing code which is slower than it needs to be. |
182 | Don't allocate a `Vec` where an iterator would do, don't allocate strings needlessly. | 182 | Don't allocate a `Vec` where an iterator would do, don't allocate strings needlessly. |
@@ -197,12 +197,12 @@ if words.len() != 2 { | |||
197 | } | 197 | } |
198 | ``` | 198 | ``` |
199 | 199 | ||
200 | ## Documentation | 200 | # Documentation |
201 | 201 | ||
202 | For `.md` and `.adoc` files, prefer a sentence-per-line format, don't wrap lines. | 202 | For `.md` and `.adoc` files, prefer a sentence-per-line format, don't wrap lines. |
203 | If the line is too long, you want to split the sentence in two :-) | 203 | If the line is too long, you want to split the sentence in two :-) |
204 | 204 | ||
205 | ## Commit Style | 205 | # Commit Style |
206 | 206 | ||
207 | We don't have specific rules around git history hygiene. | 207 | We don't have specific rules around git history hygiene. |
208 | Maintaining clean git history is encouraged, but not enforced. | 208 | Maintaining clean git history is encouraged, but not enforced. |
diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 1a1140b04..e9edbdd10 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml | |||
@@ -18,3 +18,4 @@ quote = "1.0.2" | |||
18 | ungrammar = "1.1.1" | 18 | ungrammar = "1.1.1" |
19 | walkdir = "2.3.1" | 19 | walkdir = "2.3.1" |
20 | write-json = "0.1.0" | 20 | write-json = "0.1.0" |
21 | # Avoid adding more dependencies to this crate | ||