diff options
author | Veetaha <[email protected]> | 2020-02-06 00:33:18 +0000 |
---|---|---|
committer | Veetaha <[email protected]> | 2020-02-17 20:24:33 +0000 |
commit | 9fdf984958901a6bf16772c2c88b3163f044b390 (patch) | |
tree | b3f86c533c6a9a86a9183cb117e23efd382c045e /crates/ra_syntax/src | |
parent | 9053003e3b298b38f6029b860efc5baed1996385 (diff) |
ra_syntax: reshape SyntaxError for the sake of removing redundancy
Diffstat (limited to 'crates/ra_syntax/src')
-rw-r--r-- | crates/ra_syntax/src/lib.rs | 8 | ||||
-rw-r--r-- | crates/ra_syntax/src/parsing/lexer.rs | 92 | ||||
-rw-r--r-- | crates/ra_syntax/src/parsing/reparsing.rs | 27 | ||||
-rw-r--r-- | crates/ra_syntax/src/syntax_error.rs | 214 | ||||
-rw-r--r-- | crates/ra_syntax/src/syntax_node.rs | 14 | ||||
-rw-r--r-- | crates/ra_syntax/src/tests.rs | 21 | ||||
-rw-r--r-- | crates/ra_syntax/src/validation.rs | 155 | ||||
-rw-r--r-- | crates/ra_syntax/src/validation/block.rs | 14 |
8 files changed, 159 insertions, 386 deletions
diff --git a/crates/ra_syntax/src/lib.rs b/crates/ra_syntax/src/lib.rs index f8f4b64c1..cc02c4be5 100644 --- a/crates/ra_syntax/src/lib.rs +++ b/crates/ra_syntax/src/lib.rs | |||
@@ -41,11 +41,9 @@ use crate::syntax_node::GreenNode; | |||
41 | pub use crate::{ | 41 | pub use crate::{ |
42 | algo::InsertPosition, | 42 | algo::InsertPosition, |
43 | ast::{AstNode, AstToken}, | 43 | ast::{AstNode, AstToken}, |
44 | parsing::{ | 44 | parsing::{lex_single_syntax_kind, lex_single_valid_syntax_kind, tokenize, Token}, |
45 | lex_single_syntax_kind, lex_single_valid_syntax_kind, tokenize, Token, TokenizeError, | ||
46 | }, | ||
47 | ptr::{AstPtr, SyntaxNodePtr}, | 45 | ptr::{AstPtr, SyntaxNodePtr}, |
48 | syntax_error::{Location, SyntaxError, SyntaxErrorKind}, | 46 | syntax_error::SyntaxError, |
49 | syntax_node::{ | 47 | syntax_node::{ |
50 | Direction, NodeOrToken, SyntaxElement, SyntaxNode, SyntaxToken, SyntaxTreeBuilder, | 48 | Direction, NodeOrToken, SyntaxElement, SyntaxNode, SyntaxToken, SyntaxTreeBuilder, |
51 | }, | 49 | }, |
@@ -117,7 +115,7 @@ impl Parse<SourceFile> { | |||
117 | pub fn debug_dump(&self) -> String { | 115 | pub fn debug_dump(&self) -> String { |
118 | let mut buf = format!("{:#?}", self.tree().syntax()); | 116 | let mut buf = format!("{:#?}", self.tree().syntax()); |
119 | for err in self.errors.iter() { | 117 | for err in self.errors.iter() { |
120 | writeln!(buf, "error {:?}: {}", err.location(), err.kind()).unwrap(); | 118 | writeln!(buf, "error {:?}: {}", err.range(), err.message()).unwrap(); |
121 | } | 119 | } |
122 | buf | 120 | buf |
123 | } | 121 | } |
diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index f889e6a1d..f2684c852 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs | |||
@@ -2,7 +2,7 @@ | |||
2 | //! It is just a bridge to `rustc_lexer`. | 2 | //! It is just a bridge to `rustc_lexer`. |
3 | 3 | ||
4 | use crate::{ | 4 | use crate::{ |
5 | SyntaxError, SyntaxErrorKind, | 5 | SyntaxError, |
6 | SyntaxKind::{self, *}, | 6 | SyntaxKind::{self, *}, |
7 | TextRange, TextUnit, | 7 | TextRange, TextUnit, |
8 | }; | 8 | }; |
@@ -41,13 +41,13 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) { | |||
41 | let token_len = TextUnit::from_usize(rustc_token.len); | 41 | let token_len = TextUnit::from_usize(rustc_token.len); |
42 | let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len); | 42 | let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len); |
43 | 43 | ||
44 | let (syntax_kind, error) = | 44 | let (syntax_kind, err_message) = |
45 | rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]); | 45 | rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]); |
46 | 46 | ||
47 | tokens.push(Token { kind: syntax_kind, len: token_len }); | 47 | tokens.push(Token { kind: syntax_kind, len: token_len }); |
48 | 48 | ||
49 | if let Some(error) = error { | 49 | if let Some(err_message) = err_message { |
50 | errors.push(SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range)); | 50 | errors.push(SyntaxError::new(err_message, token_range)); |
51 | } | 51 | } |
52 | 52 | ||
53 | offset += rustc_token.len; | 53 | offset += rustc_token.len; |
@@ -94,61 +94,21 @@ fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> { | |||
94 | } | 94 | } |
95 | 95 | ||
96 | let rustc_token = rustc_lexer::first_token(text); | 96 | let rustc_token = rustc_lexer::first_token(text); |
97 | let (syntax_kind, error) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text); | 97 | let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text); |
98 | 98 | ||
99 | let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) }; | 99 | let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) }; |
100 | let error = error.map(|error| { | 100 | let optional_error = err_message.map(|err_message| { |
101 | SyntaxError::new( | 101 | SyntaxError::new(err_message, TextRange::from_to(0.into(), TextUnit::of_str(text))) |
102 | SyntaxErrorKind::TokenizeError(error), | ||
103 | TextRange::from_to(TextUnit::from(0), TextUnit::of_str(text)), | ||
104 | ) | ||
105 | }); | 102 | }); |
106 | 103 | ||
107 | Some((token, error)) | 104 | Some((token, optional_error)) |
108 | } | ||
109 | |||
110 | // FIXME: simplify TokenizeError to `SyntaxError(String, TextRange)` as per @matklad advice: | ||
111 | // https://github.com/rust-analyzer/rust-analyzer/pull/2911/files#r371175067 | ||
112 | |||
113 | /// Describes the values of `SyntaxErrorKind::TokenizeError` enum variant. | ||
114 | /// It describes all the types of errors that may happen during the tokenization | ||
115 | /// of Rust source. | ||
116 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] | ||
117 | pub enum TokenizeError { | ||
118 | /// Base prefix was provided, but there were no digits | ||
119 | /// after it, e.g. `0x`, `0b`. | ||
120 | EmptyInt, | ||
121 | /// Float exponent lacks digits e.g. `12.34e+`, `12.3E+`, `12e-`, `1_E-`, | ||
122 | EmptyExponent, | ||
123 | |||
124 | /// Block comment lacks trailing delimiter `*/` | ||
125 | UnterminatedBlockComment, | ||
126 | /// Character literal lacks trailing delimiter `'` | ||
127 | UnterminatedChar, | ||
128 | /// Characterish byte literal lacks trailing delimiter `'` | ||
129 | UnterminatedByte, | ||
130 | /// String literal lacks trailing delimiter `"` | ||
131 | UnterminatedString, | ||
132 | /// Byte string literal lacks trailing delimiter `"` | ||
133 | UnterminatedByteString, | ||
134 | /// Raw literal lacks trailing delimiter e.g. `"##` | ||
135 | UnterminatedRawString, | ||
136 | /// Raw byte string literal lacks trailing delimiter e.g. `"##` | ||
137 | UnterminatedRawByteString, | ||
138 | |||
139 | /// Raw string lacks a quote after the pound characters e.g. `r###` | ||
140 | UnstartedRawString, | ||
141 | /// Raw byte string lacks a quote after the pound characters e.g. `br###` | ||
142 | UnstartedRawByteString, | ||
143 | |||
144 | /// Lifetime starts with a number e.g. `'4ever` | ||
145 | LifetimeStartsWithNumber, | ||
146 | } | 105 | } |
147 | 106 | ||
107 | /// Returns `SyntaxKind` and an optional tokenize error message. | ||
148 | fn rustc_token_kind_to_syntax_kind( | 108 | fn rustc_token_kind_to_syntax_kind( |
149 | rustc_token_kind: &rustc_lexer::TokenKind, | 109 | rustc_token_kind: &rustc_lexer::TokenKind, |
150 | token_text: &str, | 110 | token_text: &str, |
151 | ) -> (SyntaxKind, Option<TokenizeError>) { | 111 | ) -> (SyntaxKind, Option<&'static str>) { |
152 | // A note on an intended tradeoff: | 112 | // A note on an intended tradeoff: |
153 | // We drop some useful infromation here (see patterns with double dots `..`) | 113 | // We drop some useful infromation here (see patterns with double dots `..`) |
154 | // Storing that info in `SyntaxKind` is not possible due to its layout requirements of | 114 | // Storing that info in `SyntaxKind` is not possible due to its layout requirements of |
@@ -156,14 +116,15 @@ fn rustc_token_kind_to_syntax_kind( | |||
156 | 116 | ||
157 | let syntax_kind = { | 117 | let syntax_kind = { |
158 | use rustc_lexer::TokenKind as TK; | 118 | use rustc_lexer::TokenKind as TK; |
159 | use TokenizeError as TE; | ||
160 | |||
161 | match rustc_token_kind { | 119 | match rustc_token_kind { |
162 | TK::LineComment => COMMENT, | 120 | TK::LineComment => COMMENT, |
163 | 121 | ||
164 | TK::BlockComment { terminated: true } => COMMENT, | 122 | TK::BlockComment { terminated: true } => COMMENT, |
165 | TK::BlockComment { terminated: false } => { | 123 | TK::BlockComment { terminated: false } => { |
166 | return (COMMENT, Some(TE::UnterminatedBlockComment)); | 124 | return ( |
125 | COMMENT, | ||
126 | Some("Missing trailing `*/` symbols to terminate the block comment"), | ||
127 | ); | ||
167 | } | 128 | } |
168 | 129 | ||
169 | TK::Whitespace => WHITESPACE, | 130 | TK::Whitespace => WHITESPACE, |
@@ -181,7 +142,7 @@ fn rustc_token_kind_to_syntax_kind( | |||
181 | 142 | ||
182 | TK::Lifetime { starts_with_number: false } => LIFETIME, | 143 | TK::Lifetime { starts_with_number: false } => LIFETIME, |
183 | TK::Lifetime { starts_with_number: true } => { | 144 | TK::Lifetime { starts_with_number: true } => { |
184 | return (LIFETIME, Some(TE::LifetimeStartsWithNumber)) | 145 | return (LIFETIME, Some("Lifetime name cannot start with a number")) |
185 | } | 146 | } |
186 | 147 | ||
187 | TK::Semi => SEMI, | 148 | TK::Semi => SEMI, |
@@ -217,57 +178,56 @@ fn rustc_token_kind_to_syntax_kind( | |||
217 | 178 | ||
218 | return (syntax_kind, None); | 179 | return (syntax_kind, None); |
219 | 180 | ||
220 | fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> (SyntaxKind, Option<TokenizeError>) { | 181 | fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> (SyntaxKind, Option<&'static str>) { |
221 | use rustc_lexer::LiteralKind as LK; | 182 | use rustc_lexer::LiteralKind as LK; |
222 | use TokenizeError as TE; | ||
223 | 183 | ||
224 | #[rustfmt::skip] | 184 | #[rustfmt::skip] |
225 | let syntax_kind = match *kind { | 185 | let syntax_kind = match *kind { |
226 | LK::Int { empty_int: false, .. } => INT_NUMBER, | 186 | LK::Int { empty_int: false, .. } => INT_NUMBER, |
227 | LK::Int { empty_int: true, .. } => { | 187 | LK::Int { empty_int: true, .. } => { |
228 | return (INT_NUMBER, Some(TE::EmptyInt)) | 188 | return (INT_NUMBER, Some("Missing digits after the integer base prefix")) |
229 | } | 189 | } |
230 | 190 | ||
231 | LK::Float { empty_exponent: false, .. } => FLOAT_NUMBER, | 191 | LK::Float { empty_exponent: false, .. } => FLOAT_NUMBER, |
232 | LK::Float { empty_exponent: true, .. } => { | 192 | LK::Float { empty_exponent: true, .. } => { |
233 | return (FLOAT_NUMBER, Some(TE::EmptyExponent)) | 193 | return (FLOAT_NUMBER, Some("Missing digits after the exponent symbol")) |
234 | } | 194 | } |
235 | 195 | ||
236 | LK::Char { terminated: true } => CHAR, | 196 | LK::Char { terminated: true } => CHAR, |
237 | LK::Char { terminated: false } => { | 197 | LK::Char { terminated: false } => { |
238 | return (CHAR, Some(TE::UnterminatedChar)) | 198 | return (CHAR, Some("Missing trailing `'` symbol to terminate the character literal")) |
239 | } | 199 | } |
240 | 200 | ||
241 | LK::Byte { terminated: true } => BYTE, | 201 | LK::Byte { terminated: true } => BYTE, |
242 | LK::Byte { terminated: false } => { | 202 | LK::Byte { terminated: false } => { |
243 | return (BYTE, Some(TE::UnterminatedByte)) | 203 | return (BYTE, Some("Missing trailing `'` symbol to terminate the byte literal")) |
244 | } | 204 | } |
245 | 205 | ||
246 | LK::Str { terminated: true } => STRING, | 206 | LK::Str { terminated: true } => STRING, |
247 | LK::Str { terminated: false } => { | 207 | LK::Str { terminated: false } => { |
248 | return (STRING, Some(TE::UnterminatedString)) | 208 | return (STRING, Some("Missing trailing `\"` symbol to terminate the string literal")) |
249 | } | 209 | } |
250 | 210 | ||
251 | 211 | ||
252 | LK::ByteStr { terminated: true } => BYTE_STRING, | 212 | LK::ByteStr { terminated: true } => BYTE_STRING, |
253 | LK::ByteStr { terminated: false } => { | 213 | LK::ByteStr { terminated: false } => { |
254 | return (BYTE_STRING, Some(TE::UnterminatedByteString)) | 214 | return (BYTE_STRING, Some("Missing trailing `\"` symbol to terminate the byte string literal")) |
255 | } | 215 | } |
256 | 216 | ||
257 | LK::RawStr { started: true, terminated: true, .. } => RAW_STRING, | 217 | LK::RawStr { started: true, terminated: true, .. } => RAW_STRING, |
258 | LK::RawStr { started: true, terminated: false, .. } => { | 218 | LK::RawStr { started: true, terminated: false, .. } => { |
259 | return (RAW_STRING, Some(TE::UnterminatedRawString)) | 219 | return (RAW_STRING, Some("Missing trailing `\"` with `#` symbols to terminate the raw string literal")) |
260 | } | 220 | } |
261 | LK::RawStr { started: false, .. } => { | 221 | LK::RawStr { started: false, .. } => { |
262 | return (RAW_STRING, Some(TE::UnstartedRawString)) | 222 | return (RAW_STRING, Some("Missing `\"` symbol after `#` symbols to begin the raw string literal")) |
263 | } | 223 | } |
264 | 224 | ||
265 | LK::RawByteStr { started: true, terminated: true, .. } => RAW_BYTE_STRING, | 225 | LK::RawByteStr { started: true, terminated: true, .. } => RAW_BYTE_STRING, |
266 | LK::RawByteStr { started: true, terminated: false, .. } => { | 226 | LK::RawByteStr { started: true, terminated: false, .. } => { |
267 | return (RAW_BYTE_STRING, Some(TE::UnterminatedRawByteString)) | 227 | return (RAW_BYTE_STRING, Some("Missing trailing `\"` with `#` symbols to terminate the raw byte string literal")) |
268 | } | 228 | } |
269 | LK::RawByteStr { started: false, .. } => { | 229 | LK::RawByteStr { started: false, .. } => { |
270 | return (RAW_BYTE_STRING, Some(TE::UnstartedRawByteString)) | 230 | return (RAW_BYTE_STRING, Some("Missing `\"` symbol after `#` symbols to begin the raw byte string literal")) |
271 | } | 231 | } |
272 | }; | 232 | }; |
273 | 233 | ||
diff --git a/crates/ra_syntax/src/parsing/reparsing.rs b/crates/ra_syntax/src/parsing/reparsing.rs index a86da0675..41a355ec7 100644 --- a/crates/ra_syntax/src/parsing/reparsing.rs +++ b/crates/ra_syntax/src/parsing/reparsing.rs | |||
@@ -87,7 +87,7 @@ fn reparse_block<'node>( | |||
87 | edit: &AtomTextEdit, | 87 | edit: &AtomTextEdit, |
88 | ) -> Option<(GreenNode, Vec<SyntaxError>, TextRange)> { | 88 | ) -> Option<(GreenNode, Vec<SyntaxError>, TextRange)> { |
89 | let (node, reparser) = find_reparsable_node(root, edit.delete)?; | 89 | let (node, reparser) = find_reparsable_node(root, edit.delete)?; |
90 | let text = get_text_after_edit(node.clone().into(), &edit); | 90 | let text = get_text_after_edit(node.clone().into(), edit); |
91 | 91 | ||
92 | let (tokens, new_lexer_errors) = tokenize(&text); | 92 | let (tokens, new_lexer_errors) = tokenize(&text); |
93 | if !is_balanced(&tokens) { | 93 | if !is_balanced(&tokens) { |
@@ -162,20 +162,27 @@ fn is_balanced(tokens: &[Token]) -> bool { | |||
162 | fn merge_errors( | 162 | fn merge_errors( |
163 | old_errors: Vec<SyntaxError>, | 163 | old_errors: Vec<SyntaxError>, |
164 | new_errors: Vec<SyntaxError>, | 164 | new_errors: Vec<SyntaxError>, |
165 | old_range: TextRange, | 165 | range_before_reparse: TextRange, |
166 | edit: &AtomTextEdit, | 166 | edit: &AtomTextEdit, |
167 | ) -> Vec<SyntaxError> { | 167 | ) -> Vec<SyntaxError> { |
168 | let mut res = Vec::new(); | 168 | let mut res = Vec::new(); |
169 | for e in old_errors { | 169 | |
170 | if e.offset() <= old_range.start() { | 170 | for old_err in old_errors { |
171 | res.push(e) | 171 | let old_err_range = *old_err.range(); |
172 | } else if e.offset() >= old_range.end() { | 172 | // FIXME: make sure that .start() was here previously by a mistake |
173 | res.push(e.add_offset(TextUnit::of_str(&edit.insert), edit.delete.len())); | 173 | if old_err_range.end() <= range_before_reparse.start() { |
174 | res.push(old_err); | ||
175 | } else if old_err_range.start() >= range_before_reparse.end() { | ||
176 | let inserted_len = TextUnit::of_str(&edit.insert); | ||
177 | res.push(old_err.with_range((old_err_range + inserted_len) - edit.delete.len())); | ||
178 | // Note: extra parens are intentional to prevent uint underflow, HWAB (here was a bug) | ||
174 | } | 179 | } |
175 | } | 180 | } |
176 | for e in new_errors { | 181 | res.extend(new_errors.into_iter().map(|new_err| { |
177 | res.push(e.add_offset(old_range.start(), 0.into())); | 182 | // fighting borrow checker with a variable ;) |
178 | } | 183 | let offseted_range = *new_err.range() + range_before_reparse.start(); |
184 | new_err.with_range(offseted_range) | ||
185 | })); | ||
179 | res | 186 | res |
180 | } | 187 | } |
181 | 188 | ||
diff --git a/crates/ra_syntax/src/syntax_error.rs b/crates/ra_syntax/src/syntax_error.rs index 7f9d36618..460552103 100644 --- a/crates/ra_syntax/src/syntax_error.rs +++ b/crates/ra_syntax/src/syntax_error.rs | |||
@@ -1,209 +1,47 @@ | |||
1 | //! FIXME: write short doc here | 1 | //! Module that defines `SyntaxError`. |
2 | 2 | ||
3 | use std::fmt; | 3 | use std::fmt; |
4 | 4 | ||
5 | use ra_parser::ParseError; | 5 | use crate::{TextRange, TextUnit}; |
6 | |||
7 | use crate::{validation::EscapeError, TextRange, TextUnit, TokenizeError}; | ||
8 | 6 | ||
7 | /// Represents the result of unsuccessful tokenization, parsing | ||
8 | /// or semmantical analyzis. | ||
9 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] | 9 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] |
10 | pub struct SyntaxError { | 10 | pub struct SyntaxError(String, TextRange); |
11 | kind: SyntaxErrorKind, | 11 | |
12 | location: Location, | 12 | // FIXME: there was an unused SyntaxErrorKind previously (before this enum was removed) |
13 | } | 13 | // It was introduced in this PR: https://github.com/rust-analyzer/rust-analyzer/pull/846/files#diff-827da9b03b8f9faa1bade5cdd44d5dafR95 |
14 | 14 | // but it was not removed by a mistake. | |
15 | // FIXME: Location should be just `Location(TextRange)` | 15 | // |
16 | // TextUnit enum member just unnecessarily compicates things, | 16 | // So, we need to find a place where to stick validation for attributes in match clauses. |
17 | // we should'n treat it specially, it just as a `TextRange { start: x, end: x + 1 }` | 17 | // Code before refactor: |
18 | // see `location_to_range()` in ra_ide/src/diagnostics | 18 | // InvalidMatchInnerAttr => { |
19 | #[derive(Clone, PartialEq, Eq, Hash)] | 19 | // write!(f, "Inner attributes are only allowed directly after the opening brace of the match expression") |
20 | pub enum Location { | 20 | // } |
21 | Offset(TextUnit), | ||
22 | Range(TextRange), | ||
23 | } | ||
24 | |||
25 | impl From<TextUnit> for Location { | ||
26 | fn from(offset: TextUnit) -> Location { | ||
27 | Location::Offset(offset) | ||
28 | } | ||
29 | } | ||
30 | |||
31 | impl From<TextRange> for Location { | ||
32 | fn from(range: TextRange) -> Location { | ||
33 | Location::Range(range) | ||
34 | } | ||
35 | } | ||
36 | |||
37 | impl fmt::Debug for Location { | ||
38 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | ||
39 | match self { | ||
40 | Location::Offset(it) => fmt::Debug::fmt(it, f), | ||
41 | Location::Range(it) => fmt::Debug::fmt(it, f), | ||
42 | } | ||
43 | } | ||
44 | } | ||
45 | 21 | ||
46 | impl SyntaxError { | 22 | impl SyntaxError { |
47 | pub fn new<L: Into<Location>>(kind: SyntaxErrorKind, loc: L) -> SyntaxError { | 23 | pub fn new(message: impl Into<String>, range: TextRange) -> Self { |
48 | SyntaxError { kind, location: loc.into() } | 24 | Self(message.into(), range) |
49 | } | 25 | } |
50 | 26 | pub fn new_at_offset(message: impl Into<String>, offset: TextUnit) -> Self { | |
51 | pub fn kind(&self) -> SyntaxErrorKind { | 27 | Self(message.into(), TextRange::offset_len(offset, 1.into())) |
52 | self.kind.clone() | ||
53 | } | 28 | } |
54 | 29 | ||
55 | pub fn location(&self) -> Location { | 30 | pub fn message(&self) -> &str { |
56 | self.location.clone() | 31 | &self.0 |
57 | } | 32 | } |
58 | 33 | pub fn range(&self) -> &TextRange { | |
59 | pub fn offset(&self) -> TextUnit { | 34 | &self.1 |
60 | match self.location { | ||
61 | Location::Offset(offset) => offset, | ||
62 | Location::Range(range) => range.start(), | ||
63 | } | ||
64 | } | 35 | } |
65 | 36 | ||
66 | pub fn add_offset(mut self, plus_offset: TextUnit, minus_offset: TextUnit) -> SyntaxError { | 37 | pub fn with_range(mut self, range: TextRange) -> Self { |
67 | self.location = match self.location { | 38 | self.1 = range; |
68 | Location::Range(range) => Location::Range(range + plus_offset - minus_offset), | ||
69 | Location::Offset(offset) => Location::Offset(offset + plus_offset - minus_offset), | ||
70 | }; | ||
71 | |||
72 | self | 39 | self |
73 | } | 40 | } |
74 | |||
75 | pub fn debug_dump(&self, acc: &mut impl fmt::Write) { | ||
76 | writeln!(acc, "error {:?}: {}", self.location(), self.kind()).unwrap(); | ||
77 | } | ||
78 | } | 41 | } |
79 | 42 | ||
80 | impl fmt::Display for SyntaxError { | 43 | impl fmt::Display for SyntaxError { |
81 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | 44 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
82 | self.kind.fmt(f) | 45 | self.0.fmt(f) |
83 | } | ||
84 | } | ||
85 | |||
86 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] | ||
87 | pub enum SyntaxErrorKind { | ||
88 | ParseError(ParseError), | ||
89 | EscapeError(EscapeError), | ||
90 | TokenizeError(TokenizeError), | ||
91 | // FIXME: the obvious pattern of this enum dictates that the following enum variants | ||
92 | // should be wrapped into something like `SemmanticError(SemmanticError)` | ||
93 | // or `ValidateError(ValidateError)` or `SemmanticValidateError(...)` | ||
94 | InvalidBlockAttr, | ||
95 | InvalidMatchInnerAttr, | ||
96 | InvalidTupleIndexFormat, | ||
97 | VisibilityNotAllowed, | ||
98 | InclusiveRangeMissingEnd, | ||
99 | } | ||
100 | |||
101 | impl fmt::Display for SyntaxErrorKind { | ||
102 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | ||
103 | use self::SyntaxErrorKind::*; | ||
104 | match self { | ||
105 | InvalidBlockAttr => { | ||
106 | write!(f, "A block in this position cannot accept inner attributes") | ||
107 | } | ||
108 | InvalidMatchInnerAttr => { | ||
109 | write!(f, "Inner attributes are only allowed directly after the opening brace of the match expression") | ||
110 | } | ||
111 | InvalidTupleIndexFormat => { | ||
112 | write!(f, "Tuple (struct) field access is only allowed through decimal integers with no underscores or suffix") | ||
113 | } | ||
114 | ParseError(msg) => write!(f, "{}", msg.0), | ||
115 | EscapeError(err) => write!(f, "{}", err), | ||
116 | TokenizeError(err) => write!(f, "{}", err), | ||
117 | VisibilityNotAllowed => { | ||
118 | write!(f, "unnecessary visibility qualifier") | ||
119 | } | ||
120 | InclusiveRangeMissingEnd => { | ||
121 | write!(f, "An inclusive range must have an end expression") | ||
122 | } | ||
123 | } | ||
124 | } | ||
125 | } | ||
126 | |||
127 | impl fmt::Display for TokenizeError { | ||
128 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | ||
129 | #[rustfmt::skip] | ||
130 | let msg = match self { | ||
131 | TokenizeError::EmptyInt => { | ||
132 | "Missing digits after the integer base prefix" | ||
133 | } | ||
134 | TokenizeError::EmptyExponent => { | ||
135 | "Missing digits after the exponent symbol" | ||
136 | } | ||
137 | TokenizeError::UnterminatedBlockComment => { | ||
138 | "Missing trailing `*/` symbols to terminate the block comment" | ||
139 | } | ||
140 | TokenizeError::UnterminatedChar => { | ||
141 | "Missing trailing `'` symbol to terminate the character literal" | ||
142 | } | ||
143 | TokenizeError::UnterminatedByte => { | ||
144 | "Missing trailing `'` symbol to terminate the byte literal" | ||
145 | } | ||
146 | TokenizeError::UnterminatedString => { | ||
147 | "Missing trailing `\"` symbol to terminate the string literal" | ||
148 | } | ||
149 | TokenizeError::UnterminatedByteString => { | ||
150 | "Missing trailing `\"` symbol to terminate the byte string literal" | ||
151 | } | ||
152 | TokenizeError::UnterminatedRawString => { | ||
153 | "Missing trailing `\"` with `#` symbols to terminate the raw string literal" | ||
154 | } | ||
155 | TokenizeError::UnterminatedRawByteString => { | ||
156 | "Missing trailing `\"` with `#` symbols to terminate the raw byte string literal" | ||
157 | } | ||
158 | TokenizeError::UnstartedRawString => { | ||
159 | "Missing `\"` symbol after `#` symbols to begin the raw string literal" | ||
160 | } | ||
161 | TokenizeError::UnstartedRawByteString => { | ||
162 | "Missing `\"` symbol after `#` symbols to begin the raw byte string literal" | ||
163 | } | ||
164 | TokenizeError::LifetimeStartsWithNumber => { | ||
165 | "Lifetime name cannot start with a number" | ||
166 | } | ||
167 | }; | ||
168 | write!(f, "{}", msg) | ||
169 | } | ||
170 | } | ||
171 | |||
172 | impl fmt::Display for EscapeError { | ||
173 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { | ||
174 | let msg = match self { | ||
175 | EscapeError::ZeroChars => "Empty literal", | ||
176 | EscapeError::MoreThanOneChar => "Literal should be one character long", | ||
177 | EscapeError::LoneSlash => "Character must be escaped: '\\'", | ||
178 | EscapeError::InvalidEscape => "Invalid escape sequence", | ||
179 | EscapeError::BareCarriageReturn => "Character must be escaped: '\r'", | ||
180 | EscapeError::EscapeOnlyChar => "Character must be escaped", | ||
181 | EscapeError::TooShortHexEscape => "Escape sequence should have two digits", | ||
182 | EscapeError::InvalidCharInHexEscape => "Escape sequence should be a hexadecimal number", | ||
183 | EscapeError::OutOfRangeHexEscape => "Escape sequence should be ASCII", | ||
184 | EscapeError::NoBraceInUnicodeEscape => "Invalid escape sequence", | ||
185 | EscapeError::InvalidCharInUnicodeEscape => "Invalid escape sequence", | ||
186 | EscapeError::EmptyUnicodeEscape => "Invalid escape sequence", | ||
187 | EscapeError::UnclosedUnicodeEscape => "Missing '}'", | ||
188 | EscapeError::LeadingUnderscoreUnicodeEscape => "Invalid escape sequence", | ||
189 | EscapeError::OverlongUnicodeEscape => { | ||
190 | "Unicode escape sequence should have at most 6 digits" | ||
191 | } | ||
192 | EscapeError::LoneSurrogateUnicodeEscape => { | ||
193 | "Unicode escape code should not be a surrogate" | ||
194 | } | ||
195 | EscapeError::OutOfRangeUnicodeEscape => { | ||
196 | "Unicode escape code should be at most 0x10FFFF" | ||
197 | } | ||
198 | EscapeError::UnicodeEscapeInByte => "Unicode escapes are not allowed in bytes", | ||
199 | EscapeError::NonAsciiCharInByte => "Non ASCII characters are not allowed in bytes", | ||
200 | }; | ||
201 | write!(f, "{}", msg) | ||
202 | } | ||
203 | } | ||
204 | |||
205 | impl From<EscapeError> for SyntaxErrorKind { | ||
206 | fn from(err: EscapeError) -> Self { | ||
207 | SyntaxErrorKind::EscapeError(err) | ||
208 | } | 46 | } |
209 | } | 47 | } |
diff --git a/crates/ra_syntax/src/syntax_node.rs b/crates/ra_syntax/src/syntax_node.rs index 7c2b18af3..251668996 100644 --- a/crates/ra_syntax/src/syntax_node.rs +++ b/crates/ra_syntax/src/syntax_node.rs | |||
@@ -6,15 +6,10 @@ | |||
6 | //! The *real* implementation is in the (language-agnostic) `rowan` crate, this | 6 | //! The *real* implementation is in the (language-agnostic) `rowan` crate, this |
7 | //! module just wraps its API. | 7 | //! module just wraps its API. |
8 | 8 | ||
9 | use ra_parser::ParseError; | 9 | pub(crate) use rowan::{GreenNode, GreenToken}; |
10 | use rowan::{GreenNodeBuilder, Language}; | 10 | use rowan::{GreenNodeBuilder, Language}; |
11 | 11 | ||
12 | use crate::{ | 12 | use crate::{Parse, SmolStr, SyntaxError, SyntaxKind, TextUnit}; |
13 | syntax_error::{SyntaxError, SyntaxErrorKind}, | ||
14 | Parse, SmolStr, SyntaxKind, TextUnit, | ||
15 | }; | ||
16 | |||
17 | pub(crate) use rowan::{GreenNode, GreenToken}; | ||
18 | 13 | ||
19 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] | 14 | #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] |
20 | pub enum RustLanguage {} | 15 | pub enum RustLanguage {} |
@@ -73,8 +68,7 @@ impl SyntaxTreeBuilder { | |||
73 | self.inner.finish_node() | 68 | self.inner.finish_node() |
74 | } | 69 | } |
75 | 70 | ||
76 | pub fn error(&mut self, error: ParseError, text_pos: TextUnit) { | 71 | pub fn error(&mut self, error: ra_parser::ParseError, text_pos: TextUnit) { |
77 | let error = SyntaxError::new(SyntaxErrorKind::ParseError(error), text_pos); | 72 | self.errors.push(SyntaxError::new_at_offset(error.0, text_pos)) |
78 | self.errors.push(error) | ||
79 | } | 73 | } |
80 | } | 74 | } |
diff --git a/crates/ra_syntax/src/tests.rs b/crates/ra_syntax/src/tests.rs index fb22b9e54..2533d0c44 100644 --- a/crates/ra_syntax/src/tests.rs +++ b/crates/ra_syntax/src/tests.rs | |||
@@ -5,7 +5,7 @@ use std::{ | |||
5 | 5 | ||
6 | use test_utils::{collect_tests, dir_tests, project_dir, read_text}; | 6 | use test_utils::{collect_tests, dir_tests, project_dir, read_text}; |
7 | 7 | ||
8 | use crate::{fuzz, tokenize, Location, SourceFile, SyntaxError, TextRange, Token}; | 8 | use crate::{fuzz, tokenize, SourceFile, SyntaxError, Token}; |
9 | 9 | ||
10 | #[test] | 10 | #[test] |
11 | fn lexer_tests() { | 11 | fn lexer_tests() { |
@@ -128,25 +128,14 @@ fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) | |||
128 | writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap(); | 128 | writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap(); |
129 | } | 129 | } |
130 | for err in errors { | 130 | for err in errors { |
131 | let err_range = location_to_range(err.location()); | ||
132 | writeln!( | 131 | writeln!( |
133 | acc, | 132 | acc, |
134 | "> error{:?} token({:?}) msg({})", | 133 | "> error{:?} token({:?}) msg({})", |
135 | err.location(), | 134 | err.range(), |
136 | &text[err_range], | 135 | &text[*err.range()], |
137 | err.kind() | 136 | err.message() |
138 | ) | 137 | ) |
139 | .unwrap(); | 138 | .unwrap(); |
140 | } | 139 | } |
141 | return acc; | 140 | acc |
142 | |||
143 | // FIXME: copy-pasted this from `ra_ide/src/diagnostics.rs` | ||
144 | // `Location` will be refactored soon in new PR, see todos here: | ||
145 | // https://github.com/rust-analyzer/rust-analyzer/issues/223 | ||
146 | fn location_to_range(location: Location) -> TextRange { | ||
147 | match location { | ||
148 | Location::Offset(offset) => TextRange::offset_len(offset, 1.into()), | ||
149 | Location::Range(range) => range, | ||
150 | } | ||
151 | } | ||
152 | } | 141 | } |
diff --git a/crates/ra_syntax/src/validation.rs b/crates/ra_syntax/src/validation.rs index 8a5f0e4b7..863859dca 100644 --- a/crates/ra_syntax/src/validation.rs +++ b/crates/ra_syntax/src/validation.rs | |||
@@ -5,92 +5,76 @@ mod block; | |||
5 | use rustc_lexer::unescape; | 5 | use rustc_lexer::unescape; |
6 | 6 | ||
7 | use crate::{ | 7 | use crate::{ |
8 | ast, match_ast, AstNode, SyntaxError, SyntaxErrorKind, | 8 | ast, match_ast, AstNode, SyntaxError, |
9 | SyntaxKind::{BYTE, BYTE_STRING, CHAR, CONST_DEF, FN_DEF, INT_NUMBER, STRING, TYPE_ALIAS_DEF}, | 9 | SyntaxKind::{BYTE, BYTE_STRING, CHAR, CONST_DEF, FN_DEF, INT_NUMBER, STRING, TYPE_ALIAS_DEF}, |
10 | SyntaxNode, SyntaxToken, TextUnit, T, | 10 | SyntaxNode, SyntaxToken, TextUnit, T, |
11 | }; | 11 | }; |
12 | 12 | ||
13 | #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] | 13 | fn rustc_unescape_error_to_string(err: unescape::EscapeError) -> &'static str { |
14 | pub enum EscapeError { | 14 | use unescape::EscapeError as EE; |
15 | ZeroChars, | ||
16 | MoreThanOneChar, | ||
17 | LoneSlash, | ||
18 | InvalidEscape, | ||
19 | BareCarriageReturn, | ||
20 | EscapeOnlyChar, | ||
21 | TooShortHexEscape, | ||
22 | InvalidCharInHexEscape, | ||
23 | OutOfRangeHexEscape, | ||
24 | NoBraceInUnicodeEscape, | ||
25 | InvalidCharInUnicodeEscape, | ||
26 | EmptyUnicodeEscape, | ||
27 | UnclosedUnicodeEscape, | ||
28 | LeadingUnderscoreUnicodeEscape, | ||
29 | OverlongUnicodeEscape, | ||
30 | LoneSurrogateUnicodeEscape, | ||
31 | OutOfRangeUnicodeEscape, | ||
32 | UnicodeEscapeInByte, | ||
33 | NonAsciiCharInByte, | ||
34 | } | ||
35 | 15 | ||
36 | impl From<rustc_lexer::unescape::EscapeError> for EscapeError { | 16 | #[rustfmt::skip] |
37 | fn from(err: rustc_lexer::unescape::EscapeError) -> Self { | 17 | let err_message = match err { |
38 | match err { | 18 | EE::ZeroChars => { |
39 | rustc_lexer::unescape::EscapeError::ZeroChars => EscapeError::ZeroChars, | 19 | "Literal must not be empty" |
40 | rustc_lexer::unescape::EscapeError::MoreThanOneChar => EscapeError::MoreThanOneChar, | ||
41 | rustc_lexer::unescape::EscapeError::LoneSlash => EscapeError::LoneSlash, | ||
42 | rustc_lexer::unescape::EscapeError::InvalidEscape => EscapeError::InvalidEscape, | ||
43 | rustc_lexer::unescape::EscapeError::BareCarriageReturn | ||
44 | | rustc_lexer::unescape::EscapeError::BareCarriageReturnInRawString => { | ||
45 | EscapeError::BareCarriageReturn | ||
46 | } | ||
47 | rustc_lexer::unescape::EscapeError::EscapeOnlyChar => EscapeError::EscapeOnlyChar, | ||
48 | rustc_lexer::unescape::EscapeError::TooShortHexEscape => EscapeError::TooShortHexEscape, | ||
49 | rustc_lexer::unescape::EscapeError::InvalidCharInHexEscape => { | ||
50 | EscapeError::InvalidCharInHexEscape | ||
51 | } | ||
52 | rustc_lexer::unescape::EscapeError::OutOfRangeHexEscape => { | ||
53 | EscapeError::OutOfRangeHexEscape | ||
54 | } | ||
55 | rustc_lexer::unescape::EscapeError::NoBraceInUnicodeEscape => { | ||
56 | EscapeError::NoBraceInUnicodeEscape | ||
57 | } | ||
58 | rustc_lexer::unescape::EscapeError::InvalidCharInUnicodeEscape => { | ||
59 | EscapeError::InvalidCharInUnicodeEscape | ||
60 | } | ||
61 | rustc_lexer::unescape::EscapeError::EmptyUnicodeEscape => { | ||
62 | EscapeError::EmptyUnicodeEscape | ||
63 | } | ||
64 | rustc_lexer::unescape::EscapeError::UnclosedUnicodeEscape => { | ||
65 | EscapeError::UnclosedUnicodeEscape | ||
66 | } | ||
67 | rustc_lexer::unescape::EscapeError::LeadingUnderscoreUnicodeEscape => { | ||
68 | EscapeError::LeadingUnderscoreUnicodeEscape | ||
69 | } | ||
70 | rustc_lexer::unescape::EscapeError::OverlongUnicodeEscape => { | ||
71 | EscapeError::OverlongUnicodeEscape | ||
72 | } | ||
73 | rustc_lexer::unescape::EscapeError::LoneSurrogateUnicodeEscape => { | ||
74 | EscapeError::LoneSurrogateUnicodeEscape | ||
75 | } | ||
76 | rustc_lexer::unescape::EscapeError::OutOfRangeUnicodeEscape => { | ||
77 | EscapeError::OutOfRangeUnicodeEscape | ||
78 | } | ||
79 | rustc_lexer::unescape::EscapeError::UnicodeEscapeInByte => { | ||
80 | EscapeError::UnicodeEscapeInByte | ||
81 | } | ||
82 | rustc_lexer::unescape::EscapeError::NonAsciiCharInByte | ||
83 | | rustc_lexer::unescape::EscapeError::NonAsciiCharInByteString => { | ||
84 | EscapeError::NonAsciiCharInByte | ||
85 | } | ||
86 | } | 20 | } |
87 | } | 21 | EE::MoreThanOneChar => { |
88 | } | 22 | "Literal must be one character long" |
23 | } | ||
24 | EE::LoneSlash => { | ||
25 | "Character must be escaped: `\\`" | ||
26 | } | ||
27 | EE::InvalidEscape => { | ||
28 | "Invalid escape" | ||
29 | } | ||
30 | EE::BareCarriageReturn | EE::BareCarriageReturnInRawString => { | ||
31 | "Character must be escaped: `\r`" | ||
32 | } | ||
33 | EE::EscapeOnlyChar => { | ||
34 | "Escape character `\\` must be escaped itself" | ||
35 | } | ||
36 | EE::TooShortHexEscape => { | ||
37 | "ASCII hex escape code must have exactly two digits" | ||
38 | } | ||
39 | EE::InvalidCharInHexEscape => { | ||
40 | "ASCII hex escape code must contain only hex characters" | ||
41 | } | ||
42 | EE::OutOfRangeHexEscape => { | ||
43 | "ASCII hex escape code must be at most 0x7F" | ||
44 | } | ||
45 | EE::NoBraceInUnicodeEscape => { | ||
46 | "Missing `{` to begin the unicode escape" | ||
47 | } | ||
48 | EE::InvalidCharInUnicodeEscape => { | ||
49 | "Unicode escape must contain only hex characters and underscores" | ||
50 | } | ||
51 | EE::EmptyUnicodeEscape => { | ||
52 | "Unicode escape must not be empty" | ||
53 | } | ||
54 | EE::UnclosedUnicodeEscape => { | ||
55 | "Missing '}' to terminate the unicode escape" | ||
56 | } | ||
57 | EE::LeadingUnderscoreUnicodeEscape => { | ||
58 | "Unicode escape code must not begin with an underscore" | ||
59 | } | ||
60 | EE::OverlongUnicodeEscape => { | ||
61 | "Unicode escape code must have at most 6 digits" | ||
62 | } | ||
63 | EE::LoneSurrogateUnicodeEscape => { | ||
64 | "Unicode escape code must not be a surrogate" | ||
65 | } | ||
66 | EE::OutOfRangeUnicodeEscape => { | ||
67 | "Unicode escape code must be at most 0x10FFFF" | ||
68 | } | ||
69 | EE::UnicodeEscapeInByte => { | ||
70 | "Byte literals must not contain unicode escapes" | ||
71 | } | ||
72 | EE::NonAsciiCharInByte | EE::NonAsciiCharInByteString => { | ||
73 | "Byte literals must not contain non-ASCII characters" | ||
74 | } | ||
75 | }; | ||
89 | 76 | ||
90 | impl From<rustc_lexer::unescape::EscapeError> for SyntaxErrorKind { | 77 | err_message |
91 | fn from(err: rustc_lexer::unescape::EscapeError) -> Self { | ||
92 | SyntaxErrorKind::EscapeError(err.into()) | ||
93 | } | ||
94 | } | 78 | } |
95 | 79 | ||
96 | pub(crate) fn validate(root: &SyntaxNode) -> Vec<SyntaxError> { | 80 | pub(crate) fn validate(root: &SyntaxNode) -> Vec<SyntaxError> { |
@@ -118,6 +102,7 @@ pub(crate) fn validate(root: &SyntaxNode) -> Vec<SyntaxError> { | |||
118 | } | 102 | } |
119 | 103 | ||
120 | fn validate_literal(literal: ast::Literal, acc: &mut Vec<SyntaxError>) { | 104 | fn validate_literal(literal: ast::Literal, acc: &mut Vec<SyntaxError>) { |
105 | // FIXME: move this function to outer scope (https://github.com/rust-analyzer/rust-analyzer/pull/2834#discussion_r366196658) | ||
121 | fn unquote(text: &str, prefix_len: usize, end_delimiter: char) -> Option<&str> { | 106 | fn unquote(text: &str, prefix_len: usize, end_delimiter: char) -> Option<&str> { |
122 | text.rfind(end_delimiter).and_then(|end| text.get(prefix_len..end)) | 107 | text.rfind(end_delimiter).and_then(|end| text.get(prefix_len..end)) |
123 | } | 108 | } |
@@ -125,9 +110,10 @@ fn validate_literal(literal: ast::Literal, acc: &mut Vec<SyntaxError>) { | |||
125 | let token = literal.token(); | 110 | let token = literal.token(); |
126 | let text = token.text().as_str(); | 111 | let text = token.text().as_str(); |
127 | 112 | ||
113 | // FIXME: lift this lambda refactor to `fn` (https://github.com/rust-analyzer/rust-analyzer/pull/2834#discussion_r366199205) | ||
128 | let mut push_err = |prefix_len, (off, err): (usize, unescape::EscapeError)| { | 114 | let mut push_err = |prefix_len, (off, err): (usize, unescape::EscapeError)| { |
129 | let off = token.text_range().start() + TextUnit::from_usize(off + prefix_len); | 115 | let off = token.text_range().start() + TextUnit::from_usize(off + prefix_len); |
130 | acc.push(SyntaxError::new(err.into(), off)); | 116 | acc.push(SyntaxError::new_at_offset(rustc_unescape_error_to_string(err), off)); |
131 | }; | 117 | }; |
132 | 118 | ||
133 | match token.kind() { | 119 | match token.kind() { |
@@ -195,7 +181,8 @@ fn validate_numeric_name(name_ref: Option<ast::NameRef>, errors: &mut Vec<Syntax | |||
195 | if let Some(int_token) = int_token(name_ref) { | 181 | if let Some(int_token) = int_token(name_ref) { |
196 | if int_token.text().chars().any(|c| !c.is_digit(10)) { | 182 | if int_token.text().chars().any(|c| !c.is_digit(10)) { |
197 | errors.push(SyntaxError::new( | 183 | errors.push(SyntaxError::new( |
198 | SyntaxErrorKind::InvalidTupleIndexFormat, | 184 | "Tuple (struct) field access is only allowed through \ |
185 | decimal integers with no underscores or suffix", | ||
199 | int_token.text_range(), | 186 | int_token.text_range(), |
200 | )); | 187 | )); |
201 | } | 188 | } |
@@ -215,21 +202,21 @@ fn validate_visibility(vis: ast::Visibility, errors: &mut Vec<SyntaxError>) { | |||
215 | FN_DEF | CONST_DEF | TYPE_ALIAS_DEF => (), | 202 | FN_DEF | CONST_DEF | TYPE_ALIAS_DEF => (), |
216 | _ => return, | 203 | _ => return, |
217 | } | 204 | } |
205 | |||
218 | let impl_block = match parent.parent().and_then(|it| it.parent()).and_then(ast::ImplBlock::cast) | 206 | let impl_block = match parent.parent().and_then(|it| it.parent()).and_then(ast::ImplBlock::cast) |
219 | { | 207 | { |
220 | Some(it) => it, | 208 | Some(it) => it, |
221 | None => return, | 209 | None => return, |
222 | }; | 210 | }; |
223 | if impl_block.target_trait().is_some() { | 211 | if impl_block.target_trait().is_some() { |
224 | errors | 212 | errors.push(SyntaxError::new("Unnecessary visibility qualifier", vis.syntax.text_range())); |
225 | .push(SyntaxError::new(SyntaxErrorKind::VisibilityNotAllowed, vis.syntax.text_range())) | ||
226 | } | 213 | } |
227 | } | 214 | } |
228 | 215 | ||
229 | fn validate_range_expr(expr: ast::RangeExpr, errors: &mut Vec<SyntaxError>) { | 216 | fn validate_range_expr(expr: ast::RangeExpr, errors: &mut Vec<SyntaxError>) { |
230 | if expr.op_kind() == Some(ast::RangeOp::Inclusive) && expr.end().is_none() { | 217 | if expr.op_kind() == Some(ast::RangeOp::Inclusive) && expr.end().is_none() { |
231 | errors.push(SyntaxError::new( | 218 | errors.push(SyntaxError::new( |
232 | SyntaxErrorKind::InclusiveRangeMissingEnd, | 219 | "An inclusive range must have an end expression", |
233 | expr.syntax().text_range(), | 220 | expr.syntax().text_range(), |
234 | )); | 221 | )); |
235 | } | 222 | } |
diff --git a/crates/ra_syntax/src/validation/block.rs b/crates/ra_syntax/src/validation/block.rs index c85bbc1f4..8e962ab5b 100644 --- a/crates/ra_syntax/src/validation/block.rs +++ b/crates/ra_syntax/src/validation/block.rs | |||
@@ -1,9 +1,8 @@ | |||
1 | //! FIXME: write short doc here | 1 | //! Logic for validating block expressions i.e. `ast::BlockExpr`. |
2 | 2 | ||
3 | use crate::{ | 3 | use crate::{ |
4 | ast::{self, AstNode, AttrsOwner}, | 4 | ast::{self, AstNode, AttrsOwner}, |
5 | SyntaxError, | 5 | SyntaxError, |
6 | SyntaxErrorKind::*, | ||
7 | SyntaxKind::*, | 6 | SyntaxKind::*, |
8 | }; | 7 | }; |
9 | 8 | ||
@@ -15,10 +14,11 @@ pub(crate) fn validate_block_expr(expr: ast::BlockExpr, errors: &mut Vec<SyntaxE | |||
15 | } | 14 | } |
16 | } | 15 | } |
17 | if let Some(block) = expr.block() { | 16 | if let Some(block) = expr.block() { |
18 | errors.extend( | 17 | errors.extend(block.attrs().map(|attr| { |
19 | block | 18 | SyntaxError::new( |
20 | .attrs() | 19 | "A block in this position cannot accept inner attributes", |
21 | .map(|attr| SyntaxError::new(InvalidBlockAttr, attr.syntax().text_range())), | 20 | attr.syntax().text_range(), |
22 | ) | 21 | ) |
22 | })) | ||
23 | } | 23 | } |
24 | } | 24 | } |