diff options
author | Veetaha <[email protected]> | 2020-01-28 05:09:13 +0000 |
---|---|---|
committer | Veetaha <[email protected]> | 2020-02-03 22:00:55 +0000 |
commit | 9e7eaa959f9dc368a55f1a80b35651b78b3d0883 (patch) | |
tree | 4b1f4af14d9898301949fa937219006d671a72ef /crates/ra_syntax/src/parsing/reparsing.rs | |
parent | bf60661aa3e2a77fedb3e1627675842d05538860 (diff) |
ra_syntax: refactored the lexer design as per @matklad and @kiljacken PR review
Diffstat (limited to 'crates/ra_syntax/src/parsing/reparsing.rs')
-rw-r--r-- | crates/ra_syntax/src/parsing/reparsing.rs | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/crates/ra_syntax/src/parsing/reparsing.rs b/crates/ra_syntax/src/parsing/reparsing.rs index ad1a7c855..1f351e9fc 100644 --- a/crates/ra_syntax/src/parsing/reparsing.rs +++ b/crates/ra_syntax/src/parsing/reparsing.rs | |||
@@ -12,7 +12,7 @@ use ra_text_edit::AtomTextEdit; | |||
12 | use crate::{ | 12 | use crate::{ |
13 | algo, | 13 | algo, |
14 | parsing::{ | 14 | parsing::{ |
15 | lexer::{single_token, tokenize, ParsedTokens, Token}, | 15 | lexer::{lex_single_syntax_kind, tokenize, Token}, |
16 | text_token_source::TextTokenSource, | 16 | text_token_source::TextTokenSource, |
17 | text_tree_sink::TextTreeSink, | 17 | text_tree_sink::TextTreeSink, |
18 | }, | 18 | }, |
@@ -54,7 +54,7 @@ fn reparse_token<'node>( | |||
54 | } | 54 | } |
55 | 55 | ||
56 | let mut new_text = get_text_after_edit(prev_token.clone().into(), &edit); | 56 | let mut new_text = get_text_after_edit(prev_token.clone().into(), &edit); |
57 | let new_token_kind = single_token(&new_text)?.token.kind; | 57 | let (new_token_kind, _error) = lex_single_syntax_kind(&new_text)?; |
58 | 58 | ||
59 | if new_token_kind != prev_token_kind | 59 | if new_token_kind != prev_token_kind |
60 | || (new_token_kind == IDENT && is_contextual_kw(&new_text)) | 60 | || (new_token_kind == IDENT && is_contextual_kw(&new_text)) |
@@ -67,8 +67,8 @@ fn reparse_token<'node>( | |||
67 | // `b` no longer remains an identifier, but becomes a part of byte string literal | 67 | // `b` no longer remains an identifier, but becomes a part of byte string literal |
68 | if let Some(next_char) = root.text().char_at(prev_token.text_range().end()) { | 68 | if let Some(next_char) = root.text().char_at(prev_token.text_range().end()) { |
69 | new_text.push(next_char); | 69 | new_text.push(next_char); |
70 | let token_with_next_char = single_token(&new_text); | 70 | let token_with_next_char = lex_single_syntax_kind(&new_text); |
71 | if token_with_next_char.is_some() { | 71 | if let Some((_kind, _error)) = token_with_next_char { |
72 | return None; | 72 | return None; |
73 | } | 73 | } |
74 | new_text.pop(); | 74 | new_text.pop(); |
@@ -88,23 +88,26 @@ fn reparse_block<'node>( | |||
88 | ) -> Option<(GreenNode, Vec<SyntaxError>, TextRange)> { | 88 | ) -> Option<(GreenNode, Vec<SyntaxError>, TextRange)> { |
89 | let (node, reparser) = find_reparsable_node(root, edit.delete)?; | 89 | let (node, reparser) = find_reparsable_node(root, edit.delete)?; |
90 | let text = get_text_after_edit(node.clone().into(), &edit); | 90 | let text = get_text_after_edit(node.clone().into(), &edit); |
91 | let ParsedTokens { tokens, errors } = tokenize(&text); | 91 | |
92 | let (tokens, new_lexer_errors) = tokenize(&text); | ||
92 | if !is_balanced(&tokens) { | 93 | if !is_balanced(&tokens) { |
93 | return None; | 94 | return None; |
94 | } | 95 | } |
96 | |||
95 | let mut token_source = TextTokenSource::new(&text, &tokens); | 97 | let mut token_source = TextTokenSource::new(&text, &tokens); |
96 | let mut tree_sink = TextTreeSink::new(&text, &tokens, errors); | 98 | let mut tree_sink = TextTreeSink::new(&text, &tokens); |
97 | reparser.parse(&mut token_source, &mut tree_sink); | 99 | reparser.parse(&mut token_source, &mut tree_sink); |
98 | let (green, new_errors) = tree_sink.finish(); | 100 | |
99 | Some((node.replace_with(green), new_errors, node.text_range())) | 101 | let (green, mut new_parser_errors) = tree_sink.finish(); |
102 | new_parser_errors.extend(new_lexer_errors); | ||
103 | |||
104 | Some((node.replace_with(green), new_parser_errors, node.text_range())) | ||
100 | } | 105 | } |
101 | 106 | ||
102 | fn get_text_after_edit(element: SyntaxElement, edit: &AtomTextEdit) -> String { | 107 | fn get_text_after_edit(element: SyntaxElement, edit: &AtomTextEdit) -> String { |
103 | let edit = | 108 | let edit = |
104 | AtomTextEdit::replace(edit.delete - element.text_range().start(), edit.insert.clone()); | 109 | AtomTextEdit::replace(edit.delete - element.text_range().start(), edit.insert.clone()); |
105 | 110 | ||
106 | // Note: we could move this match to a method or even further: use enum_dispatch crate | ||
107 | // https://crates.io/crates/enum_dispatch | ||
108 | let text = match element { | 111 | let text = match element { |
109 | NodeOrToken::Token(token) => token.text().to_string(), | 112 | NodeOrToken::Token(token) => token.text().to_string(), |
110 | NodeOrToken::Node(node) => node.text().to_string(), | 113 | NodeOrToken::Node(node) => node.text().to_string(), |
@@ -122,8 +125,6 @@ fn is_contextual_kw(text: &str) -> bool { | |||
122 | fn find_reparsable_node(node: &SyntaxNode, range: TextRange) -> Option<(SyntaxNode, Reparser)> { | 125 | fn find_reparsable_node(node: &SyntaxNode, range: TextRange) -> Option<(SyntaxNode, Reparser)> { |
123 | let node = algo::find_covering_element(node, range); | 126 | let node = algo::find_covering_element(node, range); |
124 | 127 | ||
125 | // Note: we could move this match to a method or even further: use enum_dispatch crate | ||
126 | // https://crates.io/crates/enum_dispatch | ||
127 | let mut ancestors = match node { | 128 | let mut ancestors = match node { |
128 | NodeOrToken::Token(it) => it.parent().ancestors(), | 129 | NodeOrToken::Token(it) => it.parent().ancestors(), |
129 | NodeOrToken::Node(it) => it.ancestors(), | 130 | NodeOrToken::Node(it) => it.ancestors(), |