diff options
author | bors[bot] <26634292+bors[bot]@users.noreply.github.com> | 2019-09-24 07:28:40 +0100 |
---|---|---|
committer | GitHub <[email protected]> | 2019-09-24 07:28:40 +0100 |
commit | 4c293c0a57fbe91587f6517403c30bb61ac21dc5 (patch) | |
tree | e7755c4113e079ae6558d2ad2b5e0718ea5c7284 | |
parent | c12a713739e30019497ab0f5e7bfa776122bfc6d (diff) | |
parent | 66101e931c641b7d96dcfdbb83838130eab588bc (diff) |
Merge #1902
1902: simplify r=matklad a=matklad
Co-authored-by: Aleksey Kladov <[email protected]>
-rw-r--r-- | crates/ra_mbe/src/subtree_source.rs | 10 | ||||
-rw-r--r-- | crates/ra_parser/src/syntax_kind/generated.rs | 5 | ||||
-rw-r--r-- | crates/ra_syntax/src/grammar.ron | 5 | ||||
-rw-r--r-- | crates/ra_tools/src/boilerplate_gen.rs | 44 |
4 files changed, 25 insertions, 39 deletions
diff --git a/crates/ra_mbe/src/subtree_source.rs b/crates/ra_mbe/src/subtree_source.rs index 9d6d0133f..cf7458905 100644 --- a/crates/ra_mbe/src/subtree_source.rs +++ b/crates/ra_mbe/src/subtree_source.rs | |||
@@ -148,15 +148,7 @@ fn convert_ident(ident: &tt::Ident) -> TtToken { | |||
148 | } | 148 | } |
149 | 149 | ||
150 | fn convert_punct(p: tt::Punct) -> TtToken { | 150 | fn convert_punct(p: tt::Punct) -> TtToken { |
151 | let kind = match p.char { | 151 | let kind = SyntaxKind::from_char(p.char).unwrap(); |
152 | // lexer may produce compound tokens for these ones | ||
153 | '.' => T![.], | ||
154 | ':' => T![:], | ||
155 | '=' => T![=], | ||
156 | '!' => T![!], | ||
157 | '-' => T![-], | ||
158 | c => SyntaxKind::from_char(c).unwrap(), | ||
159 | }; | ||
160 | let text = { | 152 | let text = { |
161 | let mut buf = [0u8; 4]; | 153 | let mut buf = [0u8; 4]; |
162 | let s: &str = p.char.encode_utf8(&mut buf); | 154 | let s: &str = p.char.encode_utf8(&mut buf); |
diff --git a/crates/ra_parser/src/syntax_kind/generated.rs b/crates/ra_parser/src/syntax_kind/generated.rs index 23eb3c9cb..8b43d93fe 100644 --- a/crates/ra_parser/src/syntax_kind/generated.rs +++ b/crates/ra_parser/src/syntax_kind/generated.rs | |||
@@ -342,6 +342,11 @@ impl SyntaxKind { | |||
342 | '^' => CARET, | 342 | '^' => CARET, |
343 | '%' => PERCENT, | 343 | '%' => PERCENT, |
344 | '_' => UNDERSCORE, | 344 | '_' => UNDERSCORE, |
345 | '.' => DOT, | ||
346 | ':' => COLON, | ||
347 | '=' => EQ, | ||
348 | '!' => EXCL, | ||
349 | '-' => MINUS, | ||
345 | _ => return None, | 350 | _ => return None, |
346 | }; | 351 | }; |
347 | Some(tok) | 352 | Some(tok) |
diff --git a/crates/ra_syntax/src/grammar.ron b/crates/ra_syntax/src/grammar.ron index da9de2214..5f395501a 100644 --- a/crates/ra_syntax/src/grammar.ron +++ b/crates/ra_syntax/src/grammar.ron | |||
@@ -1,7 +1,7 @@ | |||
1 | // Stores definitions which must be used in multiple places | 1 | // Stores definitions which must be used in multiple places |
2 | // See `cargo gen-syntax` (defined in crates/tools/src/main.rs) | 2 | // See `cargo gen-syntax` (defined in crates/tools/src/main.rs) |
3 | Grammar( | 3 | Grammar( |
4 | single_byte_tokens: [ | 4 | punct: [ |
5 | (";", "SEMI"), | 5 | (";", "SEMI"), |
6 | (",", "COMMA"), | 6 | (",", "COMMA"), |
7 | ("(", "L_PAREN"), | 7 | ("(", "L_PAREN"), |
@@ -25,9 +25,6 @@ Grammar( | |||
25 | ("^", "CARET"), | 25 | ("^", "CARET"), |
26 | ("%", "PERCENT"), | 26 | ("%", "PERCENT"), |
27 | ("_", "UNDERSCORE"), | 27 | ("_", "UNDERSCORE"), |
28 | ], | ||
29 | // Tokens for which the longest match must be chosen (e.g. `..` is a DOTDOT, but `.` is a DOT) | ||
30 | multi_byte_tokens: [ | ||
31 | (".", "DOT"), | 28 | (".", "DOT"), |
32 | ("..", "DOTDOT"), | 29 | ("..", "DOTDOT"), |
33 | ("...", "DOTDOTDOT"), | 30 | ("...", "DOTDOTDOT"), |
diff --git a/crates/ra_tools/src/boilerplate_gen.rs b/crates/ra_tools/src/boilerplate_gen.rs index 578f13a3e..1d112c0af 100644 --- a/crates/ra_tools/src/boilerplate_gen.rs +++ b/crates/ra_tools/src/boilerplate_gen.rs | |||
@@ -160,31 +160,24 @@ fn generate_ast(grammar: &Grammar) -> Result<String> { | |||
160 | } | 160 | } |
161 | 161 | ||
162 | fn generate_syntax_kinds(grammar: &Grammar) -> Result<String> { | 162 | fn generate_syntax_kinds(grammar: &Grammar) -> Result<String> { |
163 | let single_byte_tokens_values = | 163 | let (single_byte_tokens_values, single_byte_tokens): (Vec<_>, Vec<_>) = grammar |
164 | grammar.single_byte_tokens.iter().map(|(token, _name)| token.chars().next().unwrap()); | 164 | .punct |
165 | let single_byte_tokens = grammar | ||
166 | .single_byte_tokens | ||
167 | .iter() | 165 | .iter() |
168 | .map(|(_token, name)| format_ident!("{}", name)) | 166 | .filter(|(token, _name)| token.len() == 1) |
169 | .collect::<Vec<_>>(); | 167 | .map(|(token, name)| (token.chars().next().unwrap(), format_ident!("{}", name))) |
170 | 168 | .unzip(); | |
171 | let punctuation_values = | 169 | |
172 | grammar.single_byte_tokens.iter().chain(grammar.multi_byte_tokens.iter()).map( | 170 | let punctuation_values = grammar.punct.iter().map(|(token, _name)| { |
173 | |(token, _name)| { | 171 | if "{}[]()".contains(token) { |
174 | if "{}[]()".contains(token) { | 172 | let c = token.chars().next().unwrap(); |
175 | let c = token.chars().next().unwrap(); | 173 | quote! { #c } |
176 | quote! { #c } | 174 | } else { |
177 | } else { | 175 | let cs = token.chars().map(|c| Punct::new(c, Spacing::Joint)); |
178 | let cs = token.chars().map(|c| Punct::new(c, Spacing::Joint)); | 176 | quote! { #(#cs)* } |
179 | quote! { #(#cs)* } | 177 | } |
180 | } | 178 | }); |
181 | }, | 179 | let punctuation = |
182 | ); | 180 | grammar.punct.iter().map(|(_token, name)| format_ident!("{}", name)).collect::<Vec<_>>(); |
183 | let punctuation = single_byte_tokens | ||
184 | .clone() | ||
185 | .into_iter() | ||
186 | .chain(grammar.multi_byte_tokens.iter().map(|(_token, name)| format_ident!("{}", name))) | ||
187 | .collect::<Vec<_>>(); | ||
188 | 181 | ||
189 | let full_keywords_values = &grammar.keywords; | 182 | let full_keywords_values = &grammar.keywords; |
190 | let full_keywords = | 183 | let full_keywords = |
@@ -294,8 +287,7 @@ fn reformat(text: impl std::fmt::Display) -> Result<String> { | |||
294 | 287 | ||
295 | #[derive(Deserialize, Debug)] | 288 | #[derive(Deserialize, Debug)] |
296 | struct Grammar { | 289 | struct Grammar { |
297 | single_byte_tokens: Vec<(String, String)>, | 290 | punct: Vec<(String, String)>, |
298 | multi_byte_tokens: Vec<(String, String)>, | ||
299 | keywords: Vec<String>, | 291 | keywords: Vec<String>, |
300 | contextual_keywords: Vec<String>, | 292 | contextual_keywords: Vec<String>, |
301 | literals: Vec<String>, | 293 | literals: Vec<String>, |