diff options
author | bors[bot] <26634292+bors[bot]@users.noreply.github.com> | 2020-02-03 22:51:17 +0000 |
---|---|---|
committer | GitHub <[email protected]> | 2020-02-03 22:51:17 +0000 |
commit | 918547dbe9a2907401102eba491ac25cebe1404d (patch) | |
tree | e0aa3bdcec597e81f022ac1ce388d42724a92f51 /crates/ra_syntax/src | |
parent | b090ee5a65f9630146c2842bc51fcfcc8da08da1 (diff) | |
parent | a3e5663ae0206270156fbeb926a174a40abbddb0 (diff) |
Merge #2911
2911: Implement collecting errors while tokenizing r=matklad a=Veetaha
Now we are collecting errors from `rustc_lexer` and returning them in `ParsedToken { token, error }` and `ParsedTokens { tokens, errors }` structures **([UPD]: this is now simplified, see updates bellow)**.
The main changes are introduced in `ra_syntax/parsing/lexer.rs`. It now exposes the following functions and types:
```rust
pub fn tokenize(text: &str) -> ParsedTokens;
pub fn tokenize_append(text: &str, parsed_tokens_to_append_to: &mut ParsedTokens);
pub fn first_token(text: &str) -> Option<ParsedToken>; // allows any number of tokens in text
pub fn single_token(text: &str) -> Option<ParsedToken>; // allows only a single token in text
pub struct ParsedToken { pub token: Token, pub error: Option<SyntaxError> }
pub struct ParsedTokens { pub tokens: Vec<Token>, pub errors: Vec<SyntaxError> }
pub enum TokenizeError { /* Simple enum which reflects rustc_lexer tokenization errors */ }
```
In the first commit I implemented it with iterators, but then decided that since this crate is ad hoc for `rust-analyzer` and we clearly see the places of its usage it would be better to simplify it to vectors.
This is currently WIP, because I want to add tests for error messages generated by the lexer.
I'd like to listen to you thoughts how to define these tests in `ra_syntax/test-data` dir.
Related issues: #223
**[UPD]**
After the PR review the API was simplified:
```rust
pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>);
// Both lex functions do not check for unescape errors
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)>;
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind>;
// This will be removed in the next PR in favour of simlifying `SyntaxError` to `(String, TextRange)`
pub enum TokenizeError { /* Simple enum which reflects rustc_lexer tokenization errors */ }
// this is private, but may be made public if such demand would exist in future (least privilege principle)
fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)>;
```
Co-authored-by: Veetaha <[email protected]>
Diffstat (limited to 'crates/ra_syntax/src')
-rw-r--r-- | crates/ra_syntax/src/algo.rs | 2 | ||||
-rw-r--r-- | crates/ra_syntax/src/lib.rs | 4 | ||||
-rw-r--r-- | crates/ra_syntax/src/parsing.rs | 18 | ||||
-rw-r--r-- | crates/ra_syntax/src/parsing/lexer.rs | 324 | ||||
-rw-r--r-- | crates/ra_syntax/src/parsing/reparsing.rs | 55 | ||||
-rw-r--r-- | crates/ra_syntax/src/parsing/text_tree_sink.rs | 4 | ||||
-rw-r--r-- | crates/ra_syntax/src/syntax_error.rs | 60 | ||||
-rw-r--r-- | crates/ra_syntax/src/syntax_node.rs | 9 | ||||
-rw-r--r-- | crates/ra_syntax/src/tests.rs | 79 | ||||
-rw-r--r-- | crates/ra_syntax/src/validation.rs | 6 |
10 files changed, 425 insertions, 136 deletions
diff --git a/crates/ra_syntax/src/algo.rs b/crates/ra_syntax/src/algo.rs index 30a479f01..acf677e7d 100644 --- a/crates/ra_syntax/src/algo.rs +++ b/crates/ra_syntax/src/algo.rs | |||
@@ -81,7 +81,7 @@ impl TreeDiff { | |||
81 | /// Specifically, returns a map whose keys are descendants of `from` and values | 81 | /// Specifically, returns a map whose keys are descendants of `from` and values |
82 | /// are descendants of `to`, such that `replace_descendants(from, map) == to`. | 82 | /// are descendants of `to`, such that `replace_descendants(from, map) == to`. |
83 | /// | 83 | /// |
84 | /// A trivial solution is a singletom map `{ from: to }`, but this function | 84 | /// A trivial solution is a singleton map `{ from: to }`, but this function |
85 | /// tries to find a more fine-grained diff. | 85 | /// tries to find a more fine-grained diff. |
86 | pub fn diff(from: &SyntaxNode, to: &SyntaxNode) -> TreeDiff { | 86 | pub fn diff(from: &SyntaxNode, to: &SyntaxNode) -> TreeDiff { |
87 | let mut buf = FxHashMap::default(); | 87 | let mut buf = FxHashMap::default(); |
diff --git a/crates/ra_syntax/src/lib.rs b/crates/ra_syntax/src/lib.rs index 9931fec84..f8f4b64c1 100644 --- a/crates/ra_syntax/src/lib.rs +++ b/crates/ra_syntax/src/lib.rs | |||
@@ -41,7 +41,9 @@ use crate::syntax_node::GreenNode; | |||
41 | pub use crate::{ | 41 | pub use crate::{ |
42 | algo::InsertPosition, | 42 | algo::InsertPosition, |
43 | ast::{AstNode, AstToken}, | 43 | ast::{AstNode, AstToken}, |
44 | parsing::{classify_literal, tokenize, Token}, | 44 | parsing::{ |
45 | lex_single_syntax_kind, lex_single_valid_syntax_kind, tokenize, Token, TokenizeError, | ||
46 | }, | ||
45 | ptr::{AstPtr, SyntaxNodePtr}, | 47 | ptr::{AstPtr, SyntaxNodePtr}, |
46 | syntax_error::{Location, SyntaxError, SyntaxErrorKind}, | 48 | syntax_error::{Location, SyntaxError, SyntaxErrorKind}, |
47 | syntax_node::{ | 49 | syntax_node::{ |
diff --git a/crates/ra_syntax/src/parsing.rs b/crates/ra_syntax/src/parsing.rs index 0387f0378..e5eb80850 100644 --- a/crates/ra_syntax/src/parsing.rs +++ b/crates/ra_syntax/src/parsing.rs | |||
@@ -7,15 +7,23 @@ mod text_tree_sink; | |||
7 | mod reparsing; | 7 | mod reparsing; |
8 | 8 | ||
9 | use crate::{syntax_node::GreenNode, SyntaxError}; | 9 | use crate::{syntax_node::GreenNode, SyntaxError}; |
10 | use text_token_source::TextTokenSource; | ||
11 | use text_tree_sink::TextTreeSink; | ||
10 | 12 | ||
11 | pub use self::lexer::{classify_literal, tokenize, Token}; | 13 | pub use lexer::*; |
12 | 14 | ||
13 | pub(crate) use self::reparsing::incremental_reparse; | 15 | pub(crate) use self::reparsing::incremental_reparse; |
14 | 16 | ||
15 | pub(crate) fn parse_text(text: &str) -> (GreenNode, Vec<SyntaxError>) { | 17 | pub(crate) fn parse_text(text: &str) -> (GreenNode, Vec<SyntaxError>) { |
16 | let tokens = tokenize(&text); | 18 | let (tokens, lexer_errors) = tokenize(&text); |
17 | let mut token_source = text_token_source::TextTokenSource::new(text, &tokens); | 19 | |
18 | let mut tree_sink = text_tree_sink::TextTreeSink::new(text, &tokens); | 20 | let mut token_source = TextTokenSource::new(text, &tokens); |
21 | let mut tree_sink = TextTreeSink::new(text, &tokens); | ||
22 | |||
19 | ra_parser::parse(&mut token_source, &mut tree_sink); | 23 | ra_parser::parse(&mut token_source, &mut tree_sink); |
20 | tree_sink.finish() | 24 | |
25 | let (tree, mut parser_errors) = tree_sink.finish(); | ||
26 | parser_errors.extend(lexer_errors); | ||
27 | |||
28 | (tree, parser_errors) | ||
21 | } | 29 | } |
diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 6d839208d..f889e6a1d 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs | |||
@@ -1,8 +1,10 @@ | |||
1 | //! FIXME: write short doc here | 1 | //! Lexer analyzes raw input string and produces lexemes (tokens). |
2 | //! It is just a bridge to `rustc_lexer`. | ||
2 | 3 | ||
3 | use crate::{ | 4 | use crate::{ |
5 | SyntaxError, SyntaxErrorKind, | ||
4 | SyntaxKind::{self, *}, | 6 | SyntaxKind::{self, *}, |
5 | TextUnit, | 7 | TextRange, TextUnit, |
6 | }; | 8 | }; |
7 | 9 | ||
8 | /// A token of Rust source. | 10 | /// A token of Rust source. |
@@ -14,91 +16,261 @@ pub struct Token { | |||
14 | pub len: TextUnit, | 16 | pub len: TextUnit, |
15 | } | 17 | } |
16 | 18 | ||
17 | fn match_literal_kind(kind: rustc_lexer::LiteralKind) -> SyntaxKind { | 19 | /// Break a string up into its component tokens. |
18 | match kind { | 20 | /// Beware that it checks for shebang first and its length contributes to resulting |
19 | rustc_lexer::LiteralKind::Int { .. } => INT_NUMBER, | 21 | /// tokens offsets. |
20 | rustc_lexer::LiteralKind::Float { .. } => FLOAT_NUMBER, | 22 | pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) { |
21 | rustc_lexer::LiteralKind::Char { .. } => CHAR, | 23 | // non-empty string is a precondtion of `rustc_lexer::strip_shebang()`. |
22 | rustc_lexer::LiteralKind::Byte { .. } => BYTE, | 24 | if text.is_empty() { |
23 | rustc_lexer::LiteralKind::Str { .. } => STRING, | 25 | return Default::default(); |
24 | rustc_lexer::LiteralKind::ByteStr { .. } => BYTE_STRING, | 26 | } |
25 | rustc_lexer::LiteralKind::RawStr { .. } => RAW_STRING, | 27 | |
26 | rustc_lexer::LiteralKind::RawByteStr { .. } => RAW_BYTE_STRING, | 28 | let mut tokens = Vec::new(); |
29 | let mut errors = Vec::new(); | ||
30 | |||
31 | let mut offset: usize = rustc_lexer::strip_shebang(text) | ||
32 | .map(|shebang_len| { | ||
33 | tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) }); | ||
34 | shebang_len | ||
35 | }) | ||
36 | .unwrap_or(0); | ||
37 | |||
38 | let text_without_shebang = &text[offset..]; | ||
39 | |||
40 | for rustc_token in rustc_lexer::tokenize(text_without_shebang) { | ||
41 | let token_len = TextUnit::from_usize(rustc_token.len); | ||
42 | let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len); | ||
43 | |||
44 | let (syntax_kind, error) = | ||
45 | rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]); | ||
46 | |||
47 | tokens.push(Token { kind: syntax_kind, len: token_len }); | ||
48 | |||
49 | if let Some(error) = error { | ||
50 | errors.push(SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range)); | ||
51 | } | ||
52 | |||
53 | offset += rustc_token.len; | ||
27 | } | 54 | } |
55 | |||
56 | (tokens, errors) | ||
57 | } | ||
58 | |||
59 | /// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token | ||
60 | /// encountered at the beginning of the string. | ||
61 | /// | ||
62 | /// Returns `None` if the string contains zero *or two or more* tokens. | ||
63 | /// The token is malformed if the returned error is not `None`. | ||
64 | /// | ||
65 | /// Beware that unescape errors are not checked at tokenization time. | ||
66 | pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> { | ||
67 | lex_first_token(text) | ||
68 | .filter(|(token, _)| token.len.to_usize() == text.len()) | ||
69 | .map(|(token, error)| (token.kind, error)) | ||
70 | } | ||
71 | |||
72 | /// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and | ||
73 | /// returns `None` if any tokenization error occured. | ||
74 | /// | ||
75 | /// Beware that unescape errors are not checked at tokenization time. | ||
76 | pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> { | ||
77 | lex_first_token(text) | ||
78 | .filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len()) | ||
79 | .map(|(token, _error)| token.kind) | ||
28 | } | 80 | } |
29 | 81 | ||
30 | /// Break a string up into its component tokens | 82 | /// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token |
31 | pub fn tokenize(text: &str) -> Vec<Token> { | 83 | /// encountered at the beginning of the string. |
84 | /// | ||
85 | /// Returns `None` if the string contains zero tokens or if the token was parsed | ||
86 | /// with an error. | ||
87 | /// The token is malformed if the returned error is not `None`. | ||
88 | /// | ||
89 | /// Beware that unescape errors are not checked at tokenization time. | ||
90 | fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> { | ||
91 | // non-empty string is a precondtion of `rustc_lexer::first_token()`. | ||
32 | if text.is_empty() { | 92 | if text.is_empty() { |
33 | return vec![]; | 93 | return None; |
34 | } | ||
35 | let mut text = text; | ||
36 | let mut acc = Vec::new(); | ||
37 | if let Some(len) = rustc_lexer::strip_shebang(text) { | ||
38 | acc.push(Token { kind: SHEBANG, len: TextUnit::from_usize(len) }); | ||
39 | text = &text[len..]; | ||
40 | } | 94 | } |
41 | while !text.is_empty() { | 95 | |
42 | let rustc_token = rustc_lexer::first_token(text); | 96 | let rustc_token = rustc_lexer::first_token(text); |
43 | let kind = match rustc_token.kind { | 97 | let (syntax_kind, error) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text); |
44 | rustc_lexer::TokenKind::LineComment => COMMENT, | 98 | |
45 | rustc_lexer::TokenKind::BlockComment { .. } => COMMENT, | 99 | let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) }; |
46 | rustc_lexer::TokenKind::Whitespace => WHITESPACE, | 100 | let error = error.map(|error| { |
47 | rustc_lexer::TokenKind::Ident => { | 101 | SyntaxError::new( |
48 | let token_text = &text[..rustc_token.len]; | 102 | SyntaxErrorKind::TokenizeError(error), |
103 | TextRange::from_to(TextUnit::from(0), TextUnit::of_str(text)), | ||
104 | ) | ||
105 | }); | ||
106 | |||
107 | Some((token, error)) | ||
108 | } | ||
109 | |||
110 | // FIXME: simplify TokenizeError to `SyntaxError(String, TextRange)` as per @matklad advice: | ||
111 | // https://github.com/rust-analyzer/rust-analyzer/pull/2911/files#r371175067 | ||
112 | |||
113 | /// Describes the values of `SyntaxErrorKind::TokenizeError` enum variant. | ||
114 | /// It describes all the types of errors that may happen during the tokenization | ||
115 | /// of Rust source. | ||
116 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] | ||
117 | pub enum TokenizeError { | ||
118 | /// Base prefix was provided, but there were no digits | ||
119 | /// after it, e.g. `0x`, `0b`. | ||
120 | EmptyInt, | ||
121 | /// Float exponent lacks digits e.g. `12.34e+`, `12.3E+`, `12e-`, `1_E-`, | ||
122 | EmptyExponent, | ||
123 | |||
124 | /// Block comment lacks trailing delimiter `*/` | ||
125 | UnterminatedBlockComment, | ||
126 | /// Character literal lacks trailing delimiter `'` | ||
127 | UnterminatedChar, | ||
128 | /// Characterish byte literal lacks trailing delimiter `'` | ||
129 | UnterminatedByte, | ||
130 | /// String literal lacks trailing delimiter `"` | ||
131 | UnterminatedString, | ||
132 | /// Byte string literal lacks trailing delimiter `"` | ||
133 | UnterminatedByteString, | ||
134 | /// Raw literal lacks trailing delimiter e.g. `"##` | ||
135 | UnterminatedRawString, | ||
136 | /// Raw byte string literal lacks trailing delimiter e.g. `"##` | ||
137 | UnterminatedRawByteString, | ||
138 | |||
139 | /// Raw string lacks a quote after the pound characters e.g. `r###` | ||
140 | UnstartedRawString, | ||
141 | /// Raw byte string lacks a quote after the pound characters e.g. `br###` | ||
142 | UnstartedRawByteString, | ||
143 | |||
144 | /// Lifetime starts with a number e.g. `'4ever` | ||
145 | LifetimeStartsWithNumber, | ||
146 | } | ||
147 | |||
148 | fn rustc_token_kind_to_syntax_kind( | ||
149 | rustc_token_kind: &rustc_lexer::TokenKind, | ||
150 | token_text: &str, | ||
151 | ) -> (SyntaxKind, Option<TokenizeError>) { | ||
152 | // A note on an intended tradeoff: | ||
153 | // We drop some useful infromation here (see patterns with double dots `..`) | ||
154 | // Storing that info in `SyntaxKind` is not possible due to its layout requirements of | ||
155 | // being `u16` that come from `rowan::SyntaxKind`. | ||
156 | |||
157 | let syntax_kind = { | ||
158 | use rustc_lexer::TokenKind as TK; | ||
159 | use TokenizeError as TE; | ||
160 | |||
161 | match rustc_token_kind { | ||
162 | TK::LineComment => COMMENT, | ||
163 | |||
164 | TK::BlockComment { terminated: true } => COMMENT, | ||
165 | TK::BlockComment { terminated: false } => { | ||
166 | return (COMMENT, Some(TE::UnterminatedBlockComment)); | ||
167 | } | ||
168 | |||
169 | TK::Whitespace => WHITESPACE, | ||
170 | |||
171 | TK::Ident => { | ||
49 | if token_text == "_" { | 172 | if token_text == "_" { |
50 | UNDERSCORE | 173 | UNDERSCORE |
51 | } else { | 174 | } else { |
52 | SyntaxKind::from_keyword(&text[..rustc_token.len]).unwrap_or(IDENT) | 175 | SyntaxKind::from_keyword(token_text).unwrap_or(IDENT) |
53 | } | 176 | } |
54 | } | 177 | } |
55 | rustc_lexer::TokenKind::RawIdent => IDENT, | 178 | |
56 | rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind), | 179 | TK::RawIdent => IDENT, |
57 | rustc_lexer::TokenKind::Lifetime { .. } => LIFETIME, | 180 | TK::Literal { kind, .. } => return match_literal_kind(&kind), |
58 | rustc_lexer::TokenKind::Semi => SEMI, | 181 | |
59 | rustc_lexer::TokenKind::Comma => COMMA, | 182 | TK::Lifetime { starts_with_number: false } => LIFETIME, |
60 | rustc_lexer::TokenKind::Dot => DOT, | 183 | TK::Lifetime { starts_with_number: true } => { |
61 | rustc_lexer::TokenKind::OpenParen => L_PAREN, | 184 | return (LIFETIME, Some(TE::LifetimeStartsWithNumber)) |
62 | rustc_lexer::TokenKind::CloseParen => R_PAREN, | 185 | } |
63 | rustc_lexer::TokenKind::OpenBrace => L_CURLY, | 186 | |
64 | rustc_lexer::TokenKind::CloseBrace => R_CURLY, | 187 | TK::Semi => SEMI, |
65 | rustc_lexer::TokenKind::OpenBracket => L_BRACK, | 188 | TK::Comma => COMMA, |
66 | rustc_lexer::TokenKind::CloseBracket => R_BRACK, | 189 | TK::Dot => DOT, |
67 | rustc_lexer::TokenKind::At => AT, | 190 | TK::OpenParen => L_PAREN, |
68 | rustc_lexer::TokenKind::Pound => POUND, | 191 | TK::CloseParen => R_PAREN, |
69 | rustc_lexer::TokenKind::Tilde => TILDE, | 192 | TK::OpenBrace => L_CURLY, |
70 | rustc_lexer::TokenKind::Question => QUESTION, | 193 | TK::CloseBrace => R_CURLY, |
71 | rustc_lexer::TokenKind::Colon => COLON, | 194 | TK::OpenBracket => L_BRACK, |
72 | rustc_lexer::TokenKind::Dollar => DOLLAR, | 195 | TK::CloseBracket => R_BRACK, |
73 | rustc_lexer::TokenKind::Eq => EQ, | 196 | TK::At => AT, |
74 | rustc_lexer::TokenKind::Not => EXCL, | 197 | TK::Pound => POUND, |
75 | rustc_lexer::TokenKind::Lt => L_ANGLE, | 198 | TK::Tilde => TILDE, |
76 | rustc_lexer::TokenKind::Gt => R_ANGLE, | 199 | TK::Question => QUESTION, |
77 | rustc_lexer::TokenKind::Minus => MINUS, | 200 | TK::Colon => COLON, |
78 | rustc_lexer::TokenKind::And => AMP, | 201 | TK::Dollar => DOLLAR, |
79 | rustc_lexer::TokenKind::Or => PIPE, | 202 | TK::Eq => EQ, |
80 | rustc_lexer::TokenKind::Plus => PLUS, | 203 | TK::Not => EXCL, |
81 | rustc_lexer::TokenKind::Star => STAR, | 204 | TK::Lt => L_ANGLE, |
82 | rustc_lexer::TokenKind::Slash => SLASH, | 205 | TK::Gt => R_ANGLE, |
83 | rustc_lexer::TokenKind::Caret => CARET, | 206 | TK::Minus => MINUS, |
84 | rustc_lexer::TokenKind::Percent => PERCENT, | 207 | TK::And => AMP, |
85 | rustc_lexer::TokenKind::Unknown => ERROR, | 208 | TK::Or => PIPE, |
209 | TK::Plus => PLUS, | ||
210 | TK::Star => STAR, | ||
211 | TK::Slash => SLASH, | ||
212 | TK::Caret => CARET, | ||
213 | TK::Percent => PERCENT, | ||
214 | TK::Unknown => ERROR, | ||
215 | } | ||
216 | }; | ||
217 | |||
218 | return (syntax_kind, None); | ||
219 | |||
220 | fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> (SyntaxKind, Option<TokenizeError>) { | ||
221 | use rustc_lexer::LiteralKind as LK; | ||
222 | use TokenizeError as TE; | ||
223 | |||
224 | #[rustfmt::skip] | ||
225 | let syntax_kind = match *kind { | ||
226 | LK::Int { empty_int: false, .. } => INT_NUMBER, | ||
227 | LK::Int { empty_int: true, .. } => { | ||
228 | return (INT_NUMBER, Some(TE::EmptyInt)) | ||
229 | } | ||
230 | |||
231 | LK::Float { empty_exponent: false, .. } => FLOAT_NUMBER, | ||
232 | LK::Float { empty_exponent: true, .. } => { | ||
233 | return (FLOAT_NUMBER, Some(TE::EmptyExponent)) | ||
234 | } | ||
235 | |||
236 | LK::Char { terminated: true } => CHAR, | ||
237 | LK::Char { terminated: false } => { | ||
238 | return (CHAR, Some(TE::UnterminatedChar)) | ||
239 | } | ||
240 | |||
241 | LK::Byte { terminated: true } => BYTE, | ||
242 | LK::Byte { terminated: false } => { | ||
243 | return (BYTE, Some(TE::UnterminatedByte)) | ||
244 | } | ||
245 | |||
246 | LK::Str { terminated: true } => STRING, | ||
247 | LK::Str { terminated: false } => { | ||
248 | return (STRING, Some(TE::UnterminatedString)) | ||
249 | } | ||
250 | |||
251 | |||
252 | LK::ByteStr { terminated: true } => BYTE_STRING, | ||
253 | LK::ByteStr { terminated: false } => { | ||
254 | return (BYTE_STRING, Some(TE::UnterminatedByteString)) | ||
255 | } | ||
256 | |||
257 | LK::RawStr { started: true, terminated: true, .. } => RAW_STRING, | ||
258 | LK::RawStr { started: true, terminated: false, .. } => { | ||
259 | return (RAW_STRING, Some(TE::UnterminatedRawString)) | ||
260 | } | ||
261 | LK::RawStr { started: false, .. } => { | ||
262 | return (RAW_STRING, Some(TE::UnstartedRawString)) | ||
263 | } | ||
264 | |||
265 | LK::RawByteStr { started: true, terminated: true, .. } => RAW_BYTE_STRING, | ||
266 | LK::RawByteStr { started: true, terminated: false, .. } => { | ||
267 | return (RAW_BYTE_STRING, Some(TE::UnterminatedRawByteString)) | ||
268 | } | ||
269 | LK::RawByteStr { started: false, .. } => { | ||
270 | return (RAW_BYTE_STRING, Some(TE::UnstartedRawByteString)) | ||
271 | } | ||
86 | }; | 272 | }; |
87 | let token = Token { kind, len: TextUnit::from_usize(rustc_token.len) }; | ||
88 | acc.push(token); | ||
89 | text = &text[rustc_token.len..]; | ||
90 | } | ||
91 | acc | ||
92 | } | ||
93 | 273 | ||
94 | pub fn classify_literal(text: &str) -> Option<Token> { | 274 | (syntax_kind, None) |
95 | let t = rustc_lexer::first_token(text); | ||
96 | if t.len != text.len() { | ||
97 | return None; | ||
98 | } | 275 | } |
99 | let kind = match t.kind { | ||
100 | rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind), | ||
101 | _ => return None, | ||
102 | }; | ||
103 | Some(Token { kind, len: TextUnit::from_usize(t.len) }) | ||
104 | } | 276 | } |
diff --git a/crates/ra_syntax/src/parsing/reparsing.rs b/crates/ra_syntax/src/parsing/reparsing.rs index 06bdda11d..a86da0675 100644 --- a/crates/ra_syntax/src/parsing/reparsing.rs +++ b/crates/ra_syntax/src/parsing/reparsing.rs | |||
@@ -12,7 +12,7 @@ use ra_text_edit::AtomTextEdit; | |||
12 | use crate::{ | 12 | use crate::{ |
13 | algo, | 13 | algo, |
14 | parsing::{ | 14 | parsing::{ |
15 | lexer::{tokenize, Token}, | 15 | lexer::{lex_single_syntax_kind, tokenize, Token}, |
16 | text_token_source::TextTokenSource, | 16 | text_token_source::TextTokenSource, |
17 | text_tree_sink::TextTreeSink, | 17 | text_tree_sink::TextTreeSink, |
18 | }, | 18 | }, |
@@ -41,37 +41,42 @@ fn reparse_token<'node>( | |||
41 | root: &'node SyntaxNode, | 41 | root: &'node SyntaxNode, |
42 | edit: &AtomTextEdit, | 42 | edit: &AtomTextEdit, |
43 | ) -> Option<(GreenNode, TextRange)> { | 43 | ) -> Option<(GreenNode, TextRange)> { |
44 | let token = algo::find_covering_element(root, edit.delete).as_token()?.clone(); | 44 | let prev_token = algo::find_covering_element(root, edit.delete).as_token()?.clone(); |
45 | match token.kind() { | 45 | let prev_token_kind = prev_token.kind(); |
46 | match prev_token_kind { | ||
46 | WHITESPACE | COMMENT | IDENT | STRING | RAW_STRING => { | 47 | WHITESPACE | COMMENT | IDENT | STRING | RAW_STRING => { |
47 | if token.kind() == WHITESPACE || token.kind() == COMMENT { | 48 | if prev_token_kind == WHITESPACE || prev_token_kind == COMMENT { |
48 | // removing a new line may extends previous token | 49 | // removing a new line may extends previous token |
49 | if token.text().to_string()[edit.delete - token.text_range().start()].contains('\n') | 50 | let deleted_range = edit.delete - prev_token.text_range().start(); |
50 | { | 51 | if prev_token.text()[deleted_range].contains('\n') { |
51 | return None; | 52 | return None; |
52 | } | 53 | } |
53 | } | 54 | } |
54 | 55 | ||
55 | let text = get_text_after_edit(token.clone().into(), &edit); | 56 | let mut new_text = get_text_after_edit(prev_token.clone().into(), &edit); |
56 | let lex_tokens = tokenize(&text); | 57 | let (new_token_kind, _error) = lex_single_syntax_kind(&new_text)?; |
57 | let lex_token = match lex_tokens[..] { | ||
58 | [lex_token] if lex_token.kind == token.kind() => lex_token, | ||
59 | _ => return None, | ||
60 | }; | ||
61 | 58 | ||
62 | if lex_token.kind == IDENT && is_contextual_kw(&text) { | 59 | if new_token_kind != prev_token_kind |
60 | || (new_token_kind == IDENT && is_contextual_kw(&new_text)) | ||
61 | { | ||
63 | return None; | 62 | return None; |
64 | } | 63 | } |
65 | 64 | ||
66 | if let Some(next_char) = root.text().char_at(token.text_range().end()) { | 65 | // Check that edited token is not a part of the bigger token. |
67 | let tokens_with_next_char = tokenize(&format!("{}{}", text, next_char)); | 66 | // E.g. if for source code `bruh"str"` the user removed `ruh`, then |
68 | if tokens_with_next_char.len() == 1 { | 67 | // `b` no longer remains an identifier, but becomes a part of byte string literal |
68 | if let Some(next_char) = root.text().char_at(prev_token.text_range().end()) { | ||
69 | new_text.push(next_char); | ||
70 | let token_with_next_char = lex_single_syntax_kind(&new_text); | ||
71 | if let Some((_kind, _error)) = token_with_next_char { | ||
69 | return None; | 72 | return None; |
70 | } | 73 | } |
74 | new_text.pop(); | ||
71 | } | 75 | } |
72 | 76 | ||
73 | let new_token = GreenToken::new(rowan::SyntaxKind(token.kind().into()), text.into()); | 77 | let new_token = |
74 | Some((token.replace_with(new_token), token.text_range())) | 78 | GreenToken::new(rowan::SyntaxKind(prev_token_kind.into()), new_text.into()); |
79 | Some((prev_token.replace_with(new_token), prev_token.text_range())) | ||
75 | } | 80 | } |
76 | _ => None, | 81 | _ => None, |
77 | } | 82 | } |
@@ -83,20 +88,26 @@ fn reparse_block<'node>( | |||
83 | ) -> Option<(GreenNode, Vec<SyntaxError>, TextRange)> { | 88 | ) -> Option<(GreenNode, Vec<SyntaxError>, TextRange)> { |
84 | let (node, reparser) = find_reparsable_node(root, edit.delete)?; | 89 | let (node, reparser) = find_reparsable_node(root, edit.delete)?; |
85 | let text = get_text_after_edit(node.clone().into(), &edit); | 90 | let text = get_text_after_edit(node.clone().into(), &edit); |
86 | let tokens = tokenize(&text); | 91 | |
92 | let (tokens, new_lexer_errors) = tokenize(&text); | ||
87 | if !is_balanced(&tokens) { | 93 | if !is_balanced(&tokens) { |
88 | return None; | 94 | return None; |
89 | } | 95 | } |
96 | |||
90 | let mut token_source = TextTokenSource::new(&text, &tokens); | 97 | let mut token_source = TextTokenSource::new(&text, &tokens); |
91 | let mut tree_sink = TextTreeSink::new(&text, &tokens); | 98 | let mut tree_sink = TextTreeSink::new(&text, &tokens); |
92 | reparser.parse(&mut token_source, &mut tree_sink); | 99 | reparser.parse(&mut token_source, &mut tree_sink); |
93 | let (green, new_errors) = tree_sink.finish(); | 100 | |
94 | Some((node.replace_with(green), new_errors, node.text_range())) | 101 | let (green, mut new_parser_errors) = tree_sink.finish(); |
102 | new_parser_errors.extend(new_lexer_errors); | ||
103 | |||
104 | Some((node.replace_with(green), new_parser_errors, node.text_range())) | ||
95 | } | 105 | } |
96 | 106 | ||
97 | fn get_text_after_edit(element: SyntaxElement, edit: &AtomTextEdit) -> String { | 107 | fn get_text_after_edit(element: SyntaxElement, edit: &AtomTextEdit) -> String { |
98 | let edit = | 108 | let edit = |
99 | AtomTextEdit::replace(edit.delete - element.text_range().start(), edit.insert.clone()); | 109 | AtomTextEdit::replace(edit.delete - element.text_range().start(), edit.insert.clone()); |
110 | |||
100 | let text = match element { | 111 | let text = match element { |
101 | NodeOrToken::Token(token) => token.text().to_string(), | 112 | NodeOrToken::Token(token) => token.text().to_string(), |
102 | NodeOrToken::Node(node) => node.text().to_string(), | 113 | NodeOrToken::Node(node) => node.text().to_string(), |
@@ -113,6 +124,7 @@ fn is_contextual_kw(text: &str) -> bool { | |||
113 | 124 | ||
114 | fn find_reparsable_node(node: &SyntaxNode, range: TextRange) -> Option<(SyntaxNode, Reparser)> { | 125 | fn find_reparsable_node(node: &SyntaxNode, range: TextRange) -> Option<(SyntaxNode, Reparser)> { |
115 | let node = algo::find_covering_element(node, range); | 126 | let node = algo::find_covering_element(node, range); |
127 | |||
116 | let mut ancestors = match node { | 128 | let mut ancestors = match node { |
117 | NodeOrToken::Token(it) => it.parent().ancestors(), | 129 | NodeOrToken::Token(it) => it.parent().ancestors(), |
118 | NodeOrToken::Node(it) => it.ancestors(), | 130 | NodeOrToken::Node(it) => it.ancestors(), |
@@ -182,7 +194,6 @@ mod tests { | |||
182 | let fully_reparsed = SourceFile::parse(&after); | 194 | let fully_reparsed = SourceFile::parse(&after); |
183 | let incrementally_reparsed: Parse<SourceFile> = { | 195 | let incrementally_reparsed: Parse<SourceFile> = { |
184 | let f = SourceFile::parse(&before); | 196 | let f = SourceFile::parse(&before); |
185 | let edit = AtomTextEdit { delete: range, insert: replace_with.to_string() }; | ||
186 | let (green, new_errors, range) = | 197 | let (green, new_errors, range) = |
187 | incremental_reparse(f.tree().syntax(), &edit, f.errors.to_vec()).unwrap(); | 198 | incremental_reparse(f.tree().syntax(), &edit, f.errors.to_vec()).unwrap(); |
188 | assert_eq!(range.len(), reparsed_len.into(), "reparsed fragment has wrong length"); | 199 | assert_eq!(range.len(), reparsed_len.into(), "reparsed fragment has wrong length"); |
diff --git a/crates/ra_syntax/src/parsing/text_tree_sink.rs b/crates/ra_syntax/src/parsing/text_tree_sink.rs index c36756d6c..dd202601d 100644 --- a/crates/ra_syntax/src/parsing/text_tree_sink.rs +++ b/crates/ra_syntax/src/parsing/text_tree_sink.rs | |||
@@ -92,8 +92,8 @@ impl<'a> TreeSink for TextTreeSink<'a> { | |||
92 | } | 92 | } |
93 | 93 | ||
94 | impl<'a> TextTreeSink<'a> { | 94 | impl<'a> TextTreeSink<'a> { |
95 | pub(super) fn new(text: &'a str, tokens: &'a [Token]) -> TextTreeSink<'a> { | 95 | pub(super) fn new(text: &'a str, tokens: &'a [Token]) -> Self { |
96 | TextTreeSink { | 96 | Self { |
97 | text, | 97 | text, |
98 | tokens, | 98 | tokens, |
99 | text_pos: 0.into(), | 99 | text_pos: 0.into(), |
diff --git a/crates/ra_syntax/src/syntax_error.rs b/crates/ra_syntax/src/syntax_error.rs index 6c171df8d..7f9d36618 100644 --- a/crates/ra_syntax/src/syntax_error.rs +++ b/crates/ra_syntax/src/syntax_error.rs | |||
@@ -4,7 +4,7 @@ use std::fmt; | |||
4 | 4 | ||
5 | use ra_parser::ParseError; | 5 | use ra_parser::ParseError; |
6 | 6 | ||
7 | use crate::{validation::EscapeError, TextRange, TextUnit}; | 7 | use crate::{validation::EscapeError, TextRange, TextUnit, TokenizeError}; |
8 | 8 | ||
9 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] | 9 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] |
10 | pub struct SyntaxError { | 10 | pub struct SyntaxError { |
@@ -12,6 +12,10 @@ pub struct SyntaxError { | |||
12 | location: Location, | 12 | location: Location, |
13 | } | 13 | } |
14 | 14 | ||
15 | // FIXME: Location should be just `Location(TextRange)` | ||
16 | // TextUnit enum member just unnecessarily compicates things, | ||
17 | // we should'n treat it specially, it just as a `TextRange { start: x, end: x + 1 }` | ||
18 | // see `location_to_range()` in ra_ide/src/diagnostics | ||
15 | #[derive(Clone, PartialEq, Eq, Hash)] | 19 | #[derive(Clone, PartialEq, Eq, Hash)] |
16 | pub enum Location { | 20 | pub enum Location { |
17 | Offset(TextUnit), | 21 | Offset(TextUnit), |
@@ -67,6 +71,10 @@ impl SyntaxError { | |||
67 | 71 | ||
68 | self | 72 | self |
69 | } | 73 | } |
74 | |||
75 | pub fn debug_dump(&self, acc: &mut impl fmt::Write) { | ||
76 | writeln!(acc, "error {:?}: {}", self.location(), self.kind()).unwrap(); | ||
77 | } | ||
70 | } | 78 | } |
71 | 79 | ||
72 | impl fmt::Display for SyntaxError { | 80 | impl fmt::Display for SyntaxError { |
@@ -79,6 +87,10 @@ impl fmt::Display for SyntaxError { | |||
79 | pub enum SyntaxErrorKind { | 87 | pub enum SyntaxErrorKind { |
80 | ParseError(ParseError), | 88 | ParseError(ParseError), |
81 | EscapeError(EscapeError), | 89 | EscapeError(EscapeError), |
90 | TokenizeError(TokenizeError), | ||
91 | // FIXME: the obvious pattern of this enum dictates that the following enum variants | ||
92 | // should be wrapped into something like `SemmanticError(SemmanticError)` | ||
93 | // or `ValidateError(ValidateError)` or `SemmanticValidateError(...)` | ||
82 | InvalidBlockAttr, | 94 | InvalidBlockAttr, |
83 | InvalidMatchInnerAttr, | 95 | InvalidMatchInnerAttr, |
84 | InvalidTupleIndexFormat, | 96 | InvalidTupleIndexFormat, |
@@ -101,6 +113,7 @@ impl fmt::Display for SyntaxErrorKind { | |||
101 | } | 113 | } |
102 | ParseError(msg) => write!(f, "{}", msg.0), | 114 | ParseError(msg) => write!(f, "{}", msg.0), |
103 | EscapeError(err) => write!(f, "{}", err), | 115 | EscapeError(err) => write!(f, "{}", err), |
116 | TokenizeError(err) => write!(f, "{}", err), | ||
104 | VisibilityNotAllowed => { | 117 | VisibilityNotAllowed => { |
105 | write!(f, "unnecessary visibility qualifier") | 118 | write!(f, "unnecessary visibility qualifier") |
106 | } | 119 | } |
@@ -111,6 +124,51 @@ impl fmt::Display for SyntaxErrorKind { | |||
111 | } | 124 | } |
112 | } | 125 | } |
113 | 126 | ||
127 | impl fmt::Display for TokenizeError { | ||
128 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | ||
129 | #[rustfmt::skip] | ||
130 | let msg = match self { | ||
131 | TokenizeError::EmptyInt => { | ||
132 | "Missing digits after the integer base prefix" | ||
133 | } | ||
134 | TokenizeError::EmptyExponent => { | ||
135 | "Missing digits after the exponent symbol" | ||
136 | } | ||
137 | TokenizeError::UnterminatedBlockComment => { | ||
138 | "Missing trailing `*/` symbols to terminate the block comment" | ||
139 | } | ||
140 | TokenizeError::UnterminatedChar => { | ||
141 | "Missing trailing `'` symbol to terminate the character literal" | ||
142 | } | ||
143 | TokenizeError::UnterminatedByte => { | ||
144 | "Missing trailing `'` symbol to terminate the byte literal" | ||
145 | } | ||
146 | TokenizeError::UnterminatedString => { | ||
147 | "Missing trailing `\"` symbol to terminate the string literal" | ||
148 | } | ||
149 | TokenizeError::UnterminatedByteString => { | ||
150 | "Missing trailing `\"` symbol to terminate the byte string literal" | ||
151 | } | ||
152 | TokenizeError::UnterminatedRawString => { | ||
153 | "Missing trailing `\"` with `#` symbols to terminate the raw string literal" | ||
154 | } | ||
155 | TokenizeError::UnterminatedRawByteString => { | ||
156 | "Missing trailing `\"` with `#` symbols to terminate the raw byte string literal" | ||
157 | } | ||
158 | TokenizeError::UnstartedRawString => { | ||
159 | "Missing `\"` symbol after `#` symbols to begin the raw string literal" | ||
160 | } | ||
161 | TokenizeError::UnstartedRawByteString => { | ||
162 | "Missing `\"` symbol after `#` symbols to begin the raw byte string literal" | ||
163 | } | ||
164 | TokenizeError::LifetimeStartsWithNumber => { | ||
165 | "Lifetime name cannot start with a number" | ||
166 | } | ||
167 | }; | ||
168 | write!(f, "{}", msg) | ||
169 | } | ||
170 | } | ||
171 | |||
114 | impl fmt::Display for EscapeError { | 172 | impl fmt::Display for EscapeError { |
115 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | 173 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
116 | let msg = match self { | 174 | let msg = match self { |
diff --git a/crates/ra_syntax/src/syntax_node.rs b/crates/ra_syntax/src/syntax_node.rs index b3eb5da63..7c2b18af3 100644 --- a/crates/ra_syntax/src/syntax_node.rs +++ b/crates/ra_syntax/src/syntax_node.rs | |||
@@ -4,7 +4,7 @@ | |||
4 | //! `SyntaxNode`, and a basic traversal API (parent, children, siblings). | 4 | //! `SyntaxNode`, and a basic traversal API (parent, children, siblings). |
5 | //! | 5 | //! |
6 | //! The *real* implementation is in the (language-agnostic) `rowan` crate, this | 6 | //! The *real* implementation is in the (language-agnostic) `rowan` crate, this |
7 | //! modules just wraps its API. | 7 | //! module just wraps its API. |
8 | 8 | ||
9 | use ra_parser::ParseError; | 9 | use ra_parser::ParseError; |
10 | use rowan::{GreenNodeBuilder, Language}; | 10 | use rowan::{GreenNodeBuilder, Language}; |
@@ -38,17 +38,12 @@ pub type SyntaxElementChildren = rowan::SyntaxElementChildren<RustLanguage>; | |||
38 | 38 | ||
39 | pub use rowan::{Direction, NodeOrToken}; | 39 | pub use rowan::{Direction, NodeOrToken}; |
40 | 40 | ||
41 | #[derive(Default)] | ||
41 | pub struct SyntaxTreeBuilder { | 42 | pub struct SyntaxTreeBuilder { |
42 | errors: Vec<SyntaxError>, | 43 | errors: Vec<SyntaxError>, |
43 | inner: GreenNodeBuilder<'static>, | 44 | inner: GreenNodeBuilder<'static>, |
44 | } | 45 | } |
45 | 46 | ||
46 | impl Default for SyntaxTreeBuilder { | ||
47 | fn default() -> SyntaxTreeBuilder { | ||
48 | SyntaxTreeBuilder { errors: Vec::new(), inner: GreenNodeBuilder::new() } | ||
49 | } | ||
50 | } | ||
51 | |||
52 | impl SyntaxTreeBuilder { | 47 | impl SyntaxTreeBuilder { |
53 | pub(crate) fn finish_raw(self) -> (GreenNode, Vec<SyntaxError>) { | 48 | pub(crate) fn finish_raw(self) -> (GreenNode, Vec<SyntaxError>) { |
54 | let green = self.inner.finish(); | 49 | let green = self.inner.finish(); |
diff --git a/crates/ra_syntax/src/tests.rs b/crates/ra_syntax/src/tests.rs index 458920607..fb22b9e54 100644 --- a/crates/ra_syntax/src/tests.rs +++ b/crates/ra_syntax/src/tests.rs | |||
@@ -1,18 +1,28 @@ | |||
1 | use std::{ | 1 | use std::{ |
2 | fmt::Write, | 2 | fmt::Write, |
3 | path::{Component, PathBuf}, | 3 | path::{Component, Path, PathBuf}, |
4 | }; | 4 | }; |
5 | 5 | ||
6 | use test_utils::{collect_tests, dir_tests, project_dir, read_text}; | 6 | use test_utils::{collect_tests, dir_tests, project_dir, read_text}; |
7 | 7 | ||
8 | use crate::{fuzz, SourceFile}; | 8 | use crate::{fuzz, tokenize, Location, SourceFile, SyntaxError, TextRange, Token}; |
9 | 9 | ||
10 | #[test] | 10 | #[test] |
11 | fn lexer_tests() { | 11 | fn lexer_tests() { |
12 | dir_tests(&test_data_dir(), &["lexer"], |text, _| { | 12 | // FIXME: |
13 | let tokens = crate::tokenize(text); | 13 | // * Add tests for unicode escapes in byte-character and [raw]-byte-string literals |
14 | dump_tokens(&tokens, text) | 14 | // * Add tests for unescape errors |
15 | }) | 15 | |
16 | dir_tests(&test_data_dir(), &["lexer/ok"], |text, path| { | ||
17 | let (tokens, errors) = tokenize(text); | ||
18 | assert_errors_are_absent(&errors, path); | ||
19 | dump_tokens_and_errors(&tokens, &errors, text) | ||
20 | }); | ||
21 | dir_tests(&test_data_dir(), &["lexer/err"], |text, path| { | ||
22 | let (tokens, errors) = tokenize(text); | ||
23 | assert_errors_are_present(&errors, path); | ||
24 | dump_tokens_and_errors(&tokens, &errors, text) | ||
25 | }); | ||
16 | } | 26 | } |
17 | 27 | ||
18 | #[test] | 28 | #[test] |
@@ -32,18 +42,13 @@ fn parser_tests() { | |||
32 | dir_tests(&test_data_dir(), &["parser/inline/ok", "parser/ok"], |text, path| { | 42 | dir_tests(&test_data_dir(), &["parser/inline/ok", "parser/ok"], |text, path| { |
33 | let parse = SourceFile::parse(text); | 43 | let parse = SourceFile::parse(text); |
34 | let errors = parse.errors(); | 44 | let errors = parse.errors(); |
35 | assert_eq!( | 45 | assert_errors_are_absent(&errors, path); |
36 | errors, | ||
37 | &[] as &[crate::SyntaxError], | ||
38 | "There should be no errors in the file {:?}", | ||
39 | path.display(), | ||
40 | ); | ||
41 | parse.debug_dump() | 46 | parse.debug_dump() |
42 | }); | 47 | }); |
43 | dir_tests(&test_data_dir(), &["parser/err", "parser/inline/err"], |text, path| { | 48 | dir_tests(&test_data_dir(), &["parser/err", "parser/inline/err"], |text, path| { |
44 | let parse = SourceFile::parse(text); | 49 | let parse = SourceFile::parse(text); |
45 | let errors = parse.errors(); | 50 | let errors = parse.errors(); |
46 | assert!(!errors.is_empty(), "There should be errors in the file {:?}", path.display()); | 51 | assert_errors_are_present(&errors, path); |
47 | parse.debug_dump() | 52 | parse.debug_dump() |
48 | }); | 53 | }); |
49 | } | 54 | } |
@@ -75,7 +80,7 @@ fn self_hosting_parsing() { | |||
75 | .into_iter() | 80 | .into_iter() |
76 | .filter_entry(|entry| { | 81 | .filter_entry(|entry| { |
77 | !entry.path().components().any(|component| { | 82 | !entry.path().components().any(|component| { |
78 | // Get all files which are not in the crates/ra_syntax/tests/data folder | 83 | // Get all files which are not in the crates/ra_syntax/test_data folder |
79 | component == Component::Normal(OsStr::new("test_data")) | 84 | component == Component::Normal(OsStr::new("test_data")) |
80 | }) | 85 | }) |
81 | }) | 86 | }) |
@@ -101,15 +106,47 @@ fn test_data_dir() -> PathBuf { | |||
101 | project_dir().join("crates/ra_syntax/test_data") | 106 | project_dir().join("crates/ra_syntax/test_data") |
102 | } | 107 | } |
103 | 108 | ||
104 | fn dump_tokens(tokens: &[crate::Token], text: &str) -> String { | 109 | fn assert_errors_are_present(errors: &[SyntaxError], path: &Path) { |
110 | assert!(!errors.is_empty(), "There should be errors in the file {:?}", path.display()); | ||
111 | } | ||
112 | fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) { | ||
113 | assert_eq!( | ||
114 | errors, | ||
115 | &[] as &[SyntaxError], | ||
116 | "There should be no errors in the file {:?}", | ||
117 | path.display(), | ||
118 | ); | ||
119 | } | ||
120 | |||
121 | fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String { | ||
105 | let mut acc = String::new(); | 122 | let mut acc = String::new(); |
106 | let mut offset = 0; | 123 | let mut offset = 0; |
107 | for token in tokens { | 124 | for token in tokens { |
108 | let len: u32 = token.len.into(); | 125 | let token_len = token.len.to_usize(); |
109 | let len = len as usize; | 126 | let token_text = &text[offset..offset + token_len]; |
110 | let token_text = &text[offset..offset + len]; | 127 | offset += token_len; |
111 | offset += len; | 128 | writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap(); |
112 | write!(acc, "{:?} {} {:?}\n", token.kind, token.len, token_text).unwrap() | 129 | } |
130 | for err in errors { | ||
131 | let err_range = location_to_range(err.location()); | ||
132 | writeln!( | ||
133 | acc, | ||
134 | "> error{:?} token({:?}) msg({})", | ||
135 | err.location(), | ||
136 | &text[err_range], | ||
137 | err.kind() | ||
138 | ) | ||
139 | .unwrap(); | ||
140 | } | ||
141 | return acc; | ||
142 | |||
143 | // FIXME: copy-pasted this from `ra_ide/src/diagnostics.rs` | ||
144 | // `Location` will be refactored soon in new PR, see todos here: | ||
145 | // https://github.com/rust-analyzer/rust-analyzer/issues/223 | ||
146 | fn location_to_range(location: Location) -> TextRange { | ||
147 | match location { | ||
148 | Location::Offset(offset) => TextRange::offset_len(offset, 1.into()), | ||
149 | Location::Range(range) => range, | ||
150 | } | ||
113 | } | 151 | } |
114 | acc | ||
115 | } | 152 | } |
diff --git a/crates/ra_syntax/src/validation.rs b/crates/ra_syntax/src/validation.rs index 445e3b3e4..8a5f0e4b7 100644 --- a/crates/ra_syntax/src/validation.rs +++ b/crates/ra_syntax/src/validation.rs | |||
@@ -94,6 +94,12 @@ impl From<rustc_lexer::unescape::EscapeError> for SyntaxErrorKind { | |||
94 | } | 94 | } |
95 | 95 | ||
96 | pub(crate) fn validate(root: &SyntaxNode) -> Vec<SyntaxError> { | 96 | pub(crate) fn validate(root: &SyntaxNode) -> Vec<SyntaxError> { |
97 | // FIXME: | ||
98 | // * Add validation of character literal containing only a single char | ||
99 | // * Add validation of `crate` keyword not appearing in the middle of the symbol path | ||
100 | // * Add validation of doc comments are being attached to nodes | ||
101 | // * Remove validation of unterminated literals (it is already implemented in `tokenize()`) | ||
102 | |||
97 | let mut errors = Vec::new(); | 103 | let mut errors = Vec::new(); |
98 | for node in root.descendants() { | 104 | for node in root.descendants() { |
99 | match_ast! { | 105 | match_ast! { |