From ac37a11f04b31f792068a1cb50dbbf5ccd4d982d Mon Sep 17 00:00:00 2001 From: Veetaha Date: Sun, 26 Jan 2020 20:44:49 +0200 Subject: Reimplemented lexer with vectors instead of iterators --- crates/ra_ide/src/references/rename.rs | 10 +- crates/ra_mbe/src/subtree_source.rs | 8 +- crates/ra_syntax/src/lib.rs | 2 +- crates/ra_syntax/src/parsing.rs | 10 +- crates/ra_syntax/src/parsing/lexer.rs | 304 ++++++++++++------------- crates/ra_syntax/src/parsing/reparsing.rs | 52 +++-- crates/ra_syntax/src/parsing/text_tree_sink.rs | 6 +- crates/ra_syntax/src/syntax_error.rs | 42 ++++ crates/ra_syntax/src/syntax_node.rs | 9 +- crates/ra_syntax/src/tests.rs | 3 +- 10 files changed, 250 insertions(+), 196 deletions(-) (limited to 'crates') diff --git a/crates/ra_ide/src/references/rename.rs b/crates/ra_ide/src/references/rename.rs index 626efb603..ad3e86f7c 100644 --- a/crates/ra_ide/src/references/rename.rs +++ b/crates/ra_ide/src/references/rename.rs @@ -2,7 +2,7 @@ use hir::ModuleSource; use ra_db::{RelativePath, RelativePathBuf, SourceDatabase, SourceDatabaseExt}; -use ra_syntax::{algo::find_node_at_offset, ast, tokenize, AstNode, SyntaxKind, SyntaxNode}; +use ra_syntax::{algo::find_node_at_offset, ast, single_token, AstNode, SyntaxKind, SyntaxNode}; use ra_text_edit::TextEdit; use crate::{ @@ -17,11 +17,9 @@ pub(crate) fn rename( position: FilePosition, new_name: &str, ) -> Option> { - let tokens = tokenize(new_name); - if tokens.len() != 1 - || (tokens[0].kind != SyntaxKind::IDENT && tokens[0].kind != SyntaxKind::UNDERSCORE) - { - return None; + match single_token(new_name)?.token.kind { + SyntaxKind::IDENT | SyntaxKind::UNDERSCORE => (), + _ => return None, } let parse = db.parse(position.file_id); diff --git a/crates/ra_mbe/src/subtree_source.rs b/crates/ra_mbe/src/subtree_source.rs index b841c39d3..72ac8df03 100644 --- a/crates/ra_mbe/src/subtree_source.rs +++ b/crates/ra_mbe/src/subtree_source.rs @@ -1,7 +1,7 @@ //! FIXME: write short doc here use ra_parser::{Token, TokenSource}; -use ra_syntax::{classify_literal, SmolStr, SyntaxKind, SyntaxKind::*, T}; +use ra_syntax::{single_token, SmolStr, SyntaxKind, SyntaxKind::*, T}; use std::cell::{Cell, Ref, RefCell}; use tt::buffer::{Cursor, TokenBuffer}; @@ -129,8 +129,10 @@ fn convert_delim(d: Option, closing: bool) -> TtToken { } fn convert_literal(l: &tt::Literal) -> TtToken { - let kind = - classify_literal(&l.text).map(|tkn| tkn.kind).unwrap_or_else(|| match l.text.as_ref() { + let kind = single_token(&l.text) + .map(|parsed| parsed.token.kind) + .filter(|kind| kind.is_literal()) + .unwrap_or_else(|| match l.text.as_ref() { "true" => T![true], "false" => T![false], _ => panic!("Fail to convert given literal {:#?}", &l), diff --git a/crates/ra_syntax/src/lib.rs b/crates/ra_syntax/src/lib.rs index 9931fec84..80b3a0b22 100644 --- a/crates/ra_syntax/src/lib.rs +++ b/crates/ra_syntax/src/lib.rs @@ -41,7 +41,7 @@ use crate::syntax_node::GreenNode; pub use crate::{ algo::InsertPosition, ast::{AstNode, AstToken}, - parsing::{classify_literal, tokenize, Token}, + parsing::{first_token, single_token, tokenize, tokenize_append, Token, TokenizeError}, ptr::{AstPtr, SyntaxNodePtr}, syntax_error::{Location, SyntaxError, SyntaxErrorKind}, syntax_node::{ diff --git a/crates/ra_syntax/src/parsing.rs b/crates/ra_syntax/src/parsing.rs index 0387f0378..4e51f920b 100644 --- a/crates/ra_syntax/src/parsing.rs +++ b/crates/ra_syntax/src/parsing.rs @@ -7,15 +7,17 @@ mod text_tree_sink; mod reparsing; use crate::{syntax_node::GreenNode, SyntaxError}; +use text_token_source::TextTokenSource; +use text_tree_sink::TextTreeSink; -pub use self::lexer::{classify_literal, tokenize, Token}; +pub use lexer::*; pub(crate) use self::reparsing::incremental_reparse; pub(crate) fn parse_text(text: &str) -> (GreenNode, Vec) { - let tokens = tokenize(&text); - let mut token_source = text_token_source::TextTokenSource::new(text, &tokens); - let mut tree_sink = text_tree_sink::TextTreeSink::new(text, &tokens); + let ParsedTokens { tokens, errors } = tokenize(&text); + let mut token_source = TextTokenSource::new(text, &tokens); + let mut tree_sink = TextTreeSink::new(text, &tokens, errors); ra_parser::parse(&mut token_source, &mut tree_sink); tree_sink.finish() } diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 9dca7d747..6d96f8400 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -1,10 +1,10 @@ //! Lexer analyzes raw input string and produces lexemes (tokens). - -use std::iter::{FromIterator, IntoIterator}; +//! It is just a bridge to `rustc_lexer`. use crate::{ + SyntaxError, SyntaxErrorKind, SyntaxKind::{self, *}, - TextUnit, + TextRange, TextUnit, }; /// A token of Rust source. @@ -15,93 +15,96 @@ pub struct Token { /// The length of the token. pub len: TextUnit, } -impl Token { - pub const fn new(kind: SyntaxKind, len: TextUnit) -> Self { - Self { kind, len } - } -} #[derive(Debug)] -/// Represents the result of parsing one token. +/// Represents the result of parsing one token. Beware that the token may be malformed. pub struct ParsedToken { /// Parsed token. pub token: Token, /// If error is present then parsed token is malformed. - pub error: Option, -} -impl ParsedToken { - pub const fn new(token: Token, error: Option) -> Self { - Self { token, error } - } + pub error: Option, } #[derive(Debug, Default)] -/// Represents the result of parsing one token. +/// Represents the result of parsing source code of Rust language. pub struct ParsedTokens { - /// Parsed token. + /// Parsed tokens in order they appear in source code. pub tokens: Vec, - /// If error is present then parsed token is malformed. - pub errors: Vec, + /// Collection of all occured tokenization errors. + /// In general `self.errors.len() <= self.tokens.len()` + pub errors: Vec, } - -impl FromIterator for ParsedTokens { - fn from_iter>(iter: I) -> Self { - let res = Self::default(); - for entry in iter { - res.tokens.push(entry.token); - if let Some(error) = entry.error { - res.errors.push(error); - } +impl ParsedTokens { + /// Append `token` and `error` (if pressent) to the result. + pub fn push(&mut self, ParsedToken { token, error }: ParsedToken) { + self.tokens.push(token); + if let Some(error) = error { + self.errors.push(error) } - res } } -/// Returns the first encountered token from the string. -/// If the string contains zero or two or more tokens returns `None`. -pub fn single_token(text: &str) -> Option { - // TODO: test whether this condition indeed checks for a single token - first_token(text).filter(|parsed| parsed.token.len.to_usize() == text.len()) +/// Same as `tokenize_append()`, just a shortcut for creating `ParsedTokens` +/// and returning the result the usual way. +pub fn tokenize(text: &str) -> ParsedTokens { + let mut parsed = ParsedTokens::default(); + tokenize_append(text, &mut parsed); + parsed } -/* -/// Returns `ParsedTokens` which are basically a pair `(Vec, Vec)` -/// This is just a shorthand for `tokenize(text).collect()` -pub fn tokenize_to_vec_with_errors(text: &str) -> ParsedTokens { - tokenize(text).collect() -} +/// Break a string up into its component tokens. +/// Returns `ParsedTokens` which are basically a pair `(Vec, Vec)`. +/// Beware that it checks for shebang first and its length contributes to resulting +/// tokens offsets. +pub fn tokenize_append(text: &str, parsed: &mut ParsedTokens) { + // non-empty string is a precondtion of `rustc_lexer::strip_shebang()`. + if text.is_empty() { + return; + } -/// The simplest version of tokenize, it just retunst a ready-made `Vec`. -/// It discards all tokenization errors while parsing. If you need that infromation -/// consider using `tokenize()` or `tokenize_to_vec_with_errors()`. -pub fn tokenize_to_vec(text: &str) -> Vec { - tokenize(text).map(|parsed_token| parsed_token.token).collect() -} -*/ + let mut offset: usize = rustc_lexer::strip_shebang(text) + .map(|shebang_len| { + parsed.tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) }); + shebang_len + }) + .unwrap_or(0); -/// Break a string up into its component tokens -/// This is the core function, all other `tokenize*()` functions are simply -/// handy shortcuts for this one. -pub fn tokenize(text: &str) -> impl Iterator + '_ { - let shebang = rustc_lexer::strip_shebang(text).map(|shebang_len| { - text = &text[shebang_len..]; - ParsedToken::new(Token::new(SHEBANG, TextUnit::from_usize(shebang_len)), None) - }); + let text_without_shebang = &text[offset..]; - // Notice that we eagerly evaluate shebang since it may change text slice - // and we cannot simplify this into a single method call chain - shebang.into_iter().chain(tokenize_without_shebang(text)) + for rustc_token in rustc_lexer::tokenize(text_without_shebang) { + parsed.push(rustc_token_to_parsed_token(&rustc_token, text, TextUnit::from_usize(offset))); + offset += rustc_token.len; + } } -pub fn tokenize_without_shebang(text: &str) -> impl Iterator + '_ { - rustc_lexer::tokenize(text).map(|rustc_token| { - let token_text = &text[..rustc_token.len]; - text = &text[rustc_token.len..]; - rustc_token_kind_to_parsed_token(&rustc_token.kind, token_text) - }) +/// Returns the first encountered token at the beginning of the string. +/// If the string contains zero or *two or more tokens* returns `None`. +/// +/// The main difference between `first_token()` and `single_token()` is that +/// the latter returns `None` if the string contains more than one token. +pub fn single_token(text: &str) -> Option { + first_token(text).filter(|parsed| parsed.token.len.to_usize() == text.len()) } -#[derive(Debug)] +/// Returns the first encountered token at the beginning of the string. +/// If the string contains zero tokens returns `None`. +/// +/// The main difference between `first_token() and single_token()` is that +/// the latter returns `None` if the string contains more than one token. +pub fn first_token(text: &str) -> Option { + // non-empty string is a precondtion of `rustc_lexer::first_token()`. + if text.is_empty() { + None + } else { + let rustc_token = rustc_lexer::first_token(text); + Some(rustc_token_to_parsed_token(&rustc_token, text, TextUnit::from(0))) + } +} + +/// Describes the values of `SyntaxErrorKind::TokenizeError` enum variant. +/// It describes all the types of errors that may happen during the tokenization +/// of Rust source. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum TokenizeError { /// Base prefix was provided, but there were no digits /// after it, e.g. `0x`. @@ -124,94 +127,95 @@ pub enum TokenizeError { /// Raw byte string literal lacks trailing delimiter e.g. `"##` UnterminatedRawByteString, - /// Raw string lacks a quote after pound characters e.g. `r###` + /// Raw string lacks a quote after the pound characters e.g. `r###` UnstartedRawString, - /// Raw byte string lacks a quote after pound characters e.g. `br###` + /// Raw byte string lacks a quote after the pound characters e.g. `br###` UnstartedRawByteString, /// Lifetime starts with a number e.g. `'4ever` LifetimeStartsWithNumber, } -fn rustc_token_kind_to_parsed_token( - rustc_token_kind: &rustc_lexer::TokenKind, - token_text: &str, +/// Mapper function that converts `rustc_lexer::Token` with some additional context +/// to `ParsedToken` +fn rustc_token_to_parsed_token( + rustc_token: &rustc_lexer::Token, + text: &str, + token_start_offset: TextUnit, ) -> ParsedToken { - use rustc_lexer::TokenKind as TK; - use TokenizeError as TE; - // We drop some useful infromation here (see patterns with double dots `..`) // Storing that info in `SyntaxKind` is not possible due to its layout requirements of // being `u16` that come from `rowan::SyntaxKind` type and changes to `rowan::SyntaxKind` - // would mean hell of a rewrite. + // would mean hell of a rewrite - let (syntax_kind, error) = match *rustc_token_kind { - TK::LineComment => ok(COMMENT), - TK::BlockComment { terminated } => ok_if(terminated, COMMENT, TE::UnterminatedBlockComment), - TK::Whitespace => ok(WHITESPACE), - TK::Ident => ok(if token_text == "_" { - UNDERSCORE - } else { - SyntaxKind::from_keyword(token_text).unwrap_or(IDENT) - }), - TK::RawIdent => ok(IDENT), - TK::Literal { kind, .. } => match_literal_kind(&kind), - TK::Lifetime { starts_with_number } => { - ok_if(!starts_with_number, LIFETIME, TE::LifetimeStartsWithNumber) + let token_range = + TextRange::offset_len(token_start_offset, TextUnit::from_usize(rustc_token.len)); + + let token_text = &text[token_range]; + + let (syntax_kind, error) = { + use rustc_lexer::TokenKind as TK; + use TokenizeError as TE; + + match rustc_token.kind { + TK::LineComment => ok(COMMENT), + TK::BlockComment { terminated } => { + ok_if(terminated, COMMENT, TE::UnterminatedBlockComment) + } + TK::Whitespace => ok(WHITESPACE), + TK::Ident => ok(if token_text == "_" { + UNDERSCORE + } else { + SyntaxKind::from_keyword(token_text).unwrap_or(IDENT) + }), + TK::RawIdent => ok(IDENT), + TK::Literal { kind, .. } => match_literal_kind(&kind), + TK::Lifetime { starts_with_number } => { + ok_if(!starts_with_number, LIFETIME, TE::LifetimeStartsWithNumber) + } + TK::Semi => ok(SEMI), + TK::Comma => ok(COMMA), + TK::Dot => ok(DOT), + TK::OpenParen => ok(L_PAREN), + TK::CloseParen => ok(R_PAREN), + TK::OpenBrace => ok(L_CURLY), + TK::CloseBrace => ok(R_CURLY), + TK::OpenBracket => ok(L_BRACK), + TK::CloseBracket => ok(R_BRACK), + TK::At => ok(AT), + TK::Pound => ok(POUND), + TK::Tilde => ok(TILDE), + TK::Question => ok(QUESTION), + TK::Colon => ok(COLON), + TK::Dollar => ok(DOLLAR), + TK::Eq => ok(EQ), + TK::Not => ok(EXCL), + TK::Lt => ok(L_ANGLE), + TK::Gt => ok(R_ANGLE), + TK::Minus => ok(MINUS), + TK::And => ok(AMP), + TK::Or => ok(PIPE), + TK::Plus => ok(PLUS), + TK::Star => ok(STAR), + TK::Slash => ok(SLASH), + TK::Caret => ok(CARET), + TK::Percent => ok(PERCENT), + TK::Unknown => ok(ERROR), } - TK::Semi => ok(SEMI), - TK::Comma => ok(COMMA), - TK::Dot => ok(DOT), - TK::OpenParen => ok(L_PAREN), - TK::CloseParen => ok(R_PAREN), - TK::OpenBrace => ok(L_CURLY), - TK::CloseBrace => ok(R_CURLY), - TK::OpenBracket => ok(L_BRACK), - TK::CloseBracket => ok(R_BRACK), - TK::At => ok(AT), - TK::Pound => ok(POUND), - TK::Tilde => ok(TILDE), - TK::Question => ok(QUESTION), - TK::Colon => ok(COLON), - TK::Dollar => ok(DOLLAR), - TK::Eq => ok(EQ), - TK::Not => ok(EXCL), - TK::Lt => ok(L_ANGLE), - TK::Gt => ok(R_ANGLE), - TK::Minus => ok(MINUS), - TK::And => ok(AMP), - TK::Or => ok(PIPE), - TK::Plus => ok(PLUS), - TK::Star => ok(STAR), - TK::Slash => ok(SLASH), - TK::Caret => ok(CARET), - TK::Percent => ok(PERCENT), - TK::Unknown => ok(ERROR), }; - return ParsedToken::new( - Token::new(syntax_kind, TextUnit::from_usize(token_text.len())), - error, - ); + return ParsedToken { + token: Token { kind: syntax_kind, len: token_range.len() }, + error: error + .map(|error| SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range)), + }; type ParsedSyntaxKind = (SyntaxKind, Option); - const fn ok(syntax_kind: SyntaxKind) -> ParsedSyntaxKind { - (syntax_kind, None) - } - const fn ok_if(cond: bool, syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { - if cond { - ok(syntax_kind) - } else { - err(syntax_kind, error) - } - } - const fn err(syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { - (syntax_kind, Some(error)) - } - - const fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> ParsedSyntaxKind { + fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> ParsedSyntaxKind { use rustc_lexer::LiteralKind as LK; + use TokenizeError as TE; + match *kind { LK::Int { empty_int, .. } => ok_if(!empty_int, INT_NUMBER, TE::EmptyInt), LK::Float { empty_exponent, .. } => { @@ -237,27 +241,17 @@ fn rustc_token_kind_to_parsed_token( } } } -} - -pub fn first_token(text: &str) -> Option { - // Checking for emptyness because of `rustc_lexer::first_token()` invariant (see its body) - if text.is_empty() { - None - } else { - let rustc_token = rustc_lexer::first_token(text); - Some(rustc_token_kind_to_parsed_token(&rustc_token.kind, &text[..rustc_token.len])) + const fn ok(syntax_kind: SyntaxKind) -> ParsedSyntaxKind { + (syntax_kind, None) } -} - -// TODO: think what to do with this ad hoc function -pub fn classify_literal(text: &str) -> Option { - let t = rustc_lexer::first_token(text); - if t.len != text.len() { - return None; + const fn err(syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { + (syntax_kind, Some(error)) + } + fn ok_if(cond: bool, syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { + if cond { + ok(syntax_kind) + } else { + err(syntax_kind, error) + } } - let kind = match t.kind { - rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind), - _ => return None, - }; - Some(ParsedToken::new(Token::new(kind, TextUnit::from_usize(t.len)))) } diff --git a/crates/ra_syntax/src/parsing/reparsing.rs b/crates/ra_syntax/src/parsing/reparsing.rs index 3abc09877..ad1a7c855 100644 --- a/crates/ra_syntax/src/parsing/reparsing.rs +++ b/crates/ra_syntax/src/parsing/reparsing.rs @@ -12,7 +12,7 @@ use ra_text_edit::AtomTextEdit; use crate::{ algo, parsing::{ - lexer::{tokenize, Token}, + lexer::{single_token, tokenize, ParsedTokens, Token}, text_token_source::TextTokenSource, text_tree_sink::TextTreeSink, }, @@ -41,36 +41,42 @@ fn reparse_token<'node>( root: &'node SyntaxNode, edit: &AtomTextEdit, ) -> Option<(GreenNode, TextRange)> { - let token = algo::find_covering_element(root, edit.delete).as_token()?.clone(); - match token.kind() { + let prev_token = algo::find_covering_element(root, edit.delete).as_token()?.clone(); + let prev_token_kind = prev_token.kind(); + match prev_token_kind { WHITESPACE | COMMENT | IDENT | STRING | RAW_STRING => { - if token.kind() == WHITESPACE || token.kind() == COMMENT { + if prev_token_kind == WHITESPACE || prev_token_kind == COMMENT { // removing a new line may extends previous token - if token.text()[edit.delete - token.text_range().start()].contains('\n') { + let deleted_range = edit.delete - prev_token.text_range().start(); + if prev_token.text()[deleted_range].contains('\n') { return None; } } - let text = get_text_after_edit(token.clone().into(), &edit); - let lex_tokens = tokenize(&text); - let lex_token = match lex_tokens[..] { - [lex_token] if lex_token.kind == token.kind() => lex_token, - _ => return None, - }; + let mut new_text = get_text_after_edit(prev_token.clone().into(), &edit); + let new_token_kind = single_token(&new_text)?.token.kind; - if lex_token.kind == IDENT && is_contextual_kw(&text) { + if new_token_kind != prev_token_kind + || (new_token_kind == IDENT && is_contextual_kw(&new_text)) + { return None; } - if let Some(next_char) = root.text().char_at(token.text_range().end()) { - let tokens_with_next_char = tokenize(&format!("{}{}", text, next_char)); - if tokens_with_next_char.len() == 1 { + // Check that edited token is not a part of the bigger token. + // E.g. if for source code `bruh"str"` the user removed `ruh`, then + // `b` no longer remains an identifier, but becomes a part of byte string literal + if let Some(next_char) = root.text().char_at(prev_token.text_range().end()) { + new_text.push(next_char); + let token_with_next_char = single_token(&new_text); + if token_with_next_char.is_some() { return None; } + new_text.pop(); } - let new_token = GreenToken::new(rowan::SyntaxKind(token.kind().into()), text.into()); - Some((token.replace_with(new_token), token.text_range())) + let new_token = + GreenToken::new(rowan::SyntaxKind(prev_token_kind.into()), new_text.into()); + Some((prev_token.replace_with(new_token), prev_token.text_range())) } _ => None, } @@ -82,12 +88,12 @@ fn reparse_block<'node>( ) -> Option<(GreenNode, Vec, TextRange)> { let (node, reparser) = find_reparsable_node(root, edit.delete)?; let text = get_text_after_edit(node.clone().into(), &edit); - let tokens = tokenize(&text); + let ParsedTokens { tokens, errors } = tokenize(&text); if !is_balanced(&tokens) { return None; } let mut token_source = TextTokenSource::new(&text, &tokens); - let mut tree_sink = TextTreeSink::new(&text, &tokens); + let mut tree_sink = TextTreeSink::new(&text, &tokens, errors); reparser.parse(&mut token_source, &mut tree_sink); let (green, new_errors) = tree_sink.finish(); Some((node.replace_with(green), new_errors, node.text_range())) @@ -96,6 +102,9 @@ fn reparse_block<'node>( fn get_text_after_edit(element: SyntaxElement, edit: &AtomTextEdit) -> String { let edit = AtomTextEdit::replace(edit.delete - element.text_range().start(), edit.insert.clone()); + + // Note: we could move this match to a method or even further: use enum_dispatch crate + // https://crates.io/crates/enum_dispatch let text = match element { NodeOrToken::Token(token) => token.text().to_string(), NodeOrToken::Node(node) => node.text().to_string(), @@ -112,6 +121,9 @@ fn is_contextual_kw(text: &str) -> bool { fn find_reparsable_node(node: &SyntaxNode, range: TextRange) -> Option<(SyntaxNode, Reparser)> { let node = algo::find_covering_element(node, range); + + // Note: we could move this match to a method or even further: use enum_dispatch crate + // https://crates.io/crates/enum_dispatch let mut ancestors = match node { NodeOrToken::Token(it) => it.parent().ancestors(), NodeOrToken::Node(it) => it.ancestors(), @@ -181,6 +193,8 @@ mod tests { let fully_reparsed = SourceFile::parse(&after); let incrementally_reparsed: Parse = { let f = SourceFile::parse(&before); + // FIXME: it seems this initialization statement is unnecessary (see edit in outer scope) + // Investigate whether it should really be removed. let edit = AtomTextEdit { delete: range, insert: replace_with.to_string() }; let (green, new_errors, range) = incremental_reparse(f.tree().syntax(), &edit, f.errors.to_vec()).unwrap(); diff --git a/crates/ra_syntax/src/parsing/text_tree_sink.rs b/crates/ra_syntax/src/parsing/text_tree_sink.rs index c36756d6c..5faac588b 100644 --- a/crates/ra_syntax/src/parsing/text_tree_sink.rs +++ b/crates/ra_syntax/src/parsing/text_tree_sink.rs @@ -92,14 +92,14 @@ impl<'a> TreeSink for TextTreeSink<'a> { } impl<'a> TextTreeSink<'a> { - pub(super) fn new(text: &'a str, tokens: &'a [Token]) -> TextTreeSink<'a> { - TextTreeSink { + pub(super) fn new(text: &'a str, tokens: &'a [Token], errors: Vec) -> Self { + Self { text, tokens, text_pos: 0.into(), token_pos: 0, state: State::PendingStart, - inner: SyntaxTreeBuilder::default(), + inner: SyntaxTreeBuilder::new(errors), } } diff --git a/crates/ra_syntax/src/syntax_error.rs b/crates/ra_syntax/src/syntax_error.rs index 9122dda29..af18a30f2 100644 --- a/crates/ra_syntax/src/syntax_error.rs +++ b/crates/ra_syntax/src/syntax_error.rs @@ -84,6 +84,9 @@ pub enum SyntaxErrorKind { ParseError(ParseError), EscapeError(EscapeError), TokenizeError(TokenizeError), + // FIXME: the obvious pattern of this enum dictates that the following enum variants + // should be wrapped into something like `SemmanticError(SemmanticError)` + // or `ValidateError(ValidateError)` or `SemmanticValidateError(...)` InvalidBlockAttr, InvalidMatchInnerAttr, InvalidTupleIndexFormat, @@ -106,6 +109,7 @@ impl fmt::Display for SyntaxErrorKind { } ParseError(msg) => write!(f, "{}", msg.0), EscapeError(err) => write!(f, "{}", err), + TokenizeError(err) => write!(f, "{}", err), VisibilityNotAllowed => { write!(f, "unnecessary visibility qualifier") } @@ -116,6 +120,44 @@ impl fmt::Display for SyntaxErrorKind { } } +impl fmt::Display for TokenizeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let msg = match self { + TokenizeError::EmptyInt => "Missing digits after integer base prefix", + TokenizeError::EmptyExponent => "Missing digits after the exponent symbol", + TokenizeError::UnterminatedBlockComment => { + "Missing trailing `*/` symbols to terminate the block comment" + } + TokenizeError::UnterminatedChar => { + "Missing trailing `'` symbol to terminate the character literal" + } + TokenizeError::UnterminatedByte => { + "Missing trailing `'` symbol to terminate the byte literal" + } + TokenizeError::UnterminatedString => { + "Missing trailing `\"` symbol to terminate the string literal" + } + TokenizeError::UnterminatedByteString => { + "Missing trailing `\"` symbol to terminate the byte string literal" + } + TokenizeError::UnterminatedRawString => { + "Missing trailing `\"` with `#` symbols to terminate the raw string literal" + } + TokenizeError::UnterminatedRawByteString => { + "Missing trailing `\"` with `#` symbols to terminate the raw byte string literal" + } + TokenizeError::UnstartedRawString => { + "Missing `\"` symbol after `#` symbols to begin the raw string literal" + } + TokenizeError::UnstartedRawByteString => { + "Missing `\"` symbol after `#` symbols to begin the raw byte string literal" + } + TokenizeError::LifetimeStartsWithNumber => "Lifetime name cannot start with a number", + }; + write!(f, "{}", msg) + } +} + impl fmt::Display for EscapeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let msg = match self { diff --git a/crates/ra_syntax/src/syntax_node.rs b/crates/ra_syntax/src/syntax_node.rs index b3eb5da63..591855302 100644 --- a/crates/ra_syntax/src/syntax_node.rs +++ b/crates/ra_syntax/src/syntax_node.rs @@ -4,7 +4,7 @@ //! `SyntaxNode`, and a basic traversal API (parent, children, siblings). //! //! The *real* implementation is in the (language-agnostic) `rowan` crate, this -//! modules just wraps its API. +//! module just wraps its API. use ra_parser::ParseError; use rowan::{GreenNodeBuilder, Language}; @@ -38,14 +38,15 @@ pub type SyntaxElementChildren = rowan::SyntaxElementChildren; pub use rowan::{Direction, NodeOrToken}; +#[derive(Default)] pub struct SyntaxTreeBuilder { errors: Vec, inner: GreenNodeBuilder<'static>, } -impl Default for SyntaxTreeBuilder { - fn default() -> SyntaxTreeBuilder { - SyntaxTreeBuilder { errors: Vec::new(), inner: GreenNodeBuilder::new() } +impl SyntaxTreeBuilder { + pub fn new(errors: Vec) -> Self { + Self { errors, inner: GreenNodeBuilder::default() } } } diff --git a/crates/ra_syntax/src/tests.rs b/crates/ra_syntax/src/tests.rs index 458920607..df21c957c 100644 --- a/crates/ra_syntax/src/tests.rs +++ b/crates/ra_syntax/src/tests.rs @@ -10,7 +10,8 @@ use crate::{fuzz, SourceFile}; #[test] fn lexer_tests() { dir_tests(&test_data_dir(), &["lexer"], |text, _| { - let tokens = crate::tokenize(text); + // FIXME: add tests for errors (their format is up to discussion) + let tokens = crate::tokenize(text).tokens; dump_tokens(&tokens, text) }) } -- cgit v1.2.3