From ad24976da38482948c586bdbc16004273662ff7e Mon Sep 17 00:00:00 2001 From: Veetaha Date: Fri, 24 Jan 2020 03:39:23 +0200 Subject: ra_syntax: changed added diagnostics information returned from tokenize() (implemented with iterators) --- crates/ra_syntax/src/parsing/lexer.rs | 299 ++++++++++++++++++++++++++-------- 1 file changed, 229 insertions(+), 70 deletions(-) (limited to 'crates/ra_syntax/src/parsing/lexer.rs') diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 6d839208d..9dca7d747 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -1,4 +1,6 @@ -//! FIXME: write short doc here +//! Lexer analyzes raw input string and produces lexemes (tokens). + +use std::iter::{FromIterator, IntoIterator}; use crate::{ SyntaxKind::{self, *}, @@ -13,85 +15,242 @@ pub struct Token { /// The length of the token. pub len: TextUnit, } +impl Token { + pub const fn new(kind: SyntaxKind, len: TextUnit) -> Self { + Self { kind, len } + } +} -fn match_literal_kind(kind: rustc_lexer::LiteralKind) -> SyntaxKind { - match kind { - rustc_lexer::LiteralKind::Int { .. } => INT_NUMBER, - rustc_lexer::LiteralKind::Float { .. } => FLOAT_NUMBER, - rustc_lexer::LiteralKind::Char { .. } => CHAR, - rustc_lexer::LiteralKind::Byte { .. } => BYTE, - rustc_lexer::LiteralKind::Str { .. } => STRING, - rustc_lexer::LiteralKind::ByteStr { .. } => BYTE_STRING, - rustc_lexer::LiteralKind::RawStr { .. } => RAW_STRING, - rustc_lexer::LiteralKind::RawByteStr { .. } => RAW_BYTE_STRING, +#[derive(Debug)] +/// Represents the result of parsing one token. +pub struct ParsedToken { + /// Parsed token. + pub token: Token, + /// If error is present then parsed token is malformed. + pub error: Option, +} +impl ParsedToken { + pub const fn new(token: Token, error: Option) -> Self { + Self { token, error } } } +#[derive(Debug, Default)] +/// Represents the result of parsing one token. +pub struct ParsedTokens { + /// Parsed token. + pub tokens: Vec, + /// If error is present then parsed token is malformed. + pub errors: Vec, +} + +impl FromIterator for ParsedTokens { + fn from_iter>(iter: I) -> Self { + let res = Self::default(); + for entry in iter { + res.tokens.push(entry.token); + if let Some(error) = entry.error { + res.errors.push(error); + } + } + res + } +} + +/// Returns the first encountered token from the string. +/// If the string contains zero or two or more tokens returns `None`. +pub fn single_token(text: &str) -> Option { + // TODO: test whether this condition indeed checks for a single token + first_token(text).filter(|parsed| parsed.token.len.to_usize() == text.len()) +} + +/* +/// Returns `ParsedTokens` which are basically a pair `(Vec, Vec)` +/// This is just a shorthand for `tokenize(text).collect()` +pub fn tokenize_to_vec_with_errors(text: &str) -> ParsedTokens { + tokenize(text).collect() +} + +/// The simplest version of tokenize, it just retunst a ready-made `Vec`. +/// It discards all tokenization errors while parsing. If you need that infromation +/// consider using `tokenize()` or `tokenize_to_vec_with_errors()`. +pub fn tokenize_to_vec(text: &str) -> Vec { + tokenize(text).map(|parsed_token| parsed_token.token).collect() +} +*/ + /// Break a string up into its component tokens -pub fn tokenize(text: &str) -> Vec { - if text.is_empty() { - return vec![]; +/// This is the core function, all other `tokenize*()` functions are simply +/// handy shortcuts for this one. +pub fn tokenize(text: &str) -> impl Iterator + '_ { + let shebang = rustc_lexer::strip_shebang(text).map(|shebang_len| { + text = &text[shebang_len..]; + ParsedToken::new(Token::new(SHEBANG, TextUnit::from_usize(shebang_len)), None) + }); + + // Notice that we eagerly evaluate shebang since it may change text slice + // and we cannot simplify this into a single method call chain + shebang.into_iter().chain(tokenize_without_shebang(text)) +} + +pub fn tokenize_without_shebang(text: &str) -> impl Iterator + '_ { + rustc_lexer::tokenize(text).map(|rustc_token| { + let token_text = &text[..rustc_token.len]; + text = &text[rustc_token.len..]; + rustc_token_kind_to_parsed_token(&rustc_token.kind, token_text) + }) +} + +#[derive(Debug)] +pub enum TokenizeError { + /// Base prefix was provided, but there were no digits + /// after it, e.g. `0x`. + EmptyInt, + /// Float exponent lacks digits e.g. `e+`, `E+`, `e-`, `E-`, + EmptyExponent, + + /// Block comment lacks trailing delimiter `*/` + UnterminatedBlockComment, + /// Character literal lacks trailing delimiter `'` + UnterminatedChar, + /// Characterish byte literal lacks trailing delimiter `'` + UnterminatedByte, + /// String literal lacks trailing delimiter `"` + UnterminatedString, + /// Byte string literal lacks trailing delimiter `"` + UnterminatedByteString, + /// Raw literal lacks trailing delimiter e.g. `"##` + UnterminatedRawString, + /// Raw byte string literal lacks trailing delimiter e.g. `"##` + UnterminatedRawByteString, + + /// Raw string lacks a quote after pound characters e.g. `r###` + UnstartedRawString, + /// Raw byte string lacks a quote after pound characters e.g. `br###` + UnstartedRawByteString, + + /// Lifetime starts with a number e.g. `'4ever` + LifetimeStartsWithNumber, +} + +fn rustc_token_kind_to_parsed_token( + rustc_token_kind: &rustc_lexer::TokenKind, + token_text: &str, +) -> ParsedToken { + use rustc_lexer::TokenKind as TK; + use TokenizeError as TE; + + // We drop some useful infromation here (see patterns with double dots `..`) + // Storing that info in `SyntaxKind` is not possible due to its layout requirements of + // being `u16` that come from `rowan::SyntaxKind` type and changes to `rowan::SyntaxKind` + // would mean hell of a rewrite. + + let (syntax_kind, error) = match *rustc_token_kind { + TK::LineComment => ok(COMMENT), + TK::BlockComment { terminated } => ok_if(terminated, COMMENT, TE::UnterminatedBlockComment), + TK::Whitespace => ok(WHITESPACE), + TK::Ident => ok(if token_text == "_" { + UNDERSCORE + } else { + SyntaxKind::from_keyword(token_text).unwrap_or(IDENT) + }), + TK::RawIdent => ok(IDENT), + TK::Literal { kind, .. } => match_literal_kind(&kind), + TK::Lifetime { starts_with_number } => { + ok_if(!starts_with_number, LIFETIME, TE::LifetimeStartsWithNumber) + } + TK::Semi => ok(SEMI), + TK::Comma => ok(COMMA), + TK::Dot => ok(DOT), + TK::OpenParen => ok(L_PAREN), + TK::CloseParen => ok(R_PAREN), + TK::OpenBrace => ok(L_CURLY), + TK::CloseBrace => ok(R_CURLY), + TK::OpenBracket => ok(L_BRACK), + TK::CloseBracket => ok(R_BRACK), + TK::At => ok(AT), + TK::Pound => ok(POUND), + TK::Tilde => ok(TILDE), + TK::Question => ok(QUESTION), + TK::Colon => ok(COLON), + TK::Dollar => ok(DOLLAR), + TK::Eq => ok(EQ), + TK::Not => ok(EXCL), + TK::Lt => ok(L_ANGLE), + TK::Gt => ok(R_ANGLE), + TK::Minus => ok(MINUS), + TK::And => ok(AMP), + TK::Or => ok(PIPE), + TK::Plus => ok(PLUS), + TK::Star => ok(STAR), + TK::Slash => ok(SLASH), + TK::Caret => ok(CARET), + TK::Percent => ok(PERCENT), + TK::Unknown => ok(ERROR), + }; + + return ParsedToken::new( + Token::new(syntax_kind, TextUnit::from_usize(token_text.len())), + error, + ); + + type ParsedSyntaxKind = (SyntaxKind, Option); + + const fn ok(syntax_kind: SyntaxKind) -> ParsedSyntaxKind { + (syntax_kind, None) } - let mut text = text; - let mut acc = Vec::new(); - if let Some(len) = rustc_lexer::strip_shebang(text) { - acc.push(Token { kind: SHEBANG, len: TextUnit::from_usize(len) }); - text = &text[len..]; + const fn ok_if(cond: bool, syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { + if cond { + ok(syntax_kind) + } else { + err(syntax_kind, error) + } } - while !text.is_empty() { - let rustc_token = rustc_lexer::first_token(text); - let kind = match rustc_token.kind { - rustc_lexer::TokenKind::LineComment => COMMENT, - rustc_lexer::TokenKind::BlockComment { .. } => COMMENT, - rustc_lexer::TokenKind::Whitespace => WHITESPACE, - rustc_lexer::TokenKind::Ident => { - let token_text = &text[..rustc_token.len]; - if token_text == "_" { - UNDERSCORE - } else { - SyntaxKind::from_keyword(&text[..rustc_token.len]).unwrap_or(IDENT) - } + const fn err(syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { + (syntax_kind, Some(error)) + } + + const fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> ParsedSyntaxKind { + use rustc_lexer::LiteralKind as LK; + match *kind { + LK::Int { empty_int, .. } => ok_if(!empty_int, INT_NUMBER, TE::EmptyInt), + LK::Float { empty_exponent, .. } => { + ok_if(!empty_exponent, FLOAT_NUMBER, TE::EmptyExponent) } - rustc_lexer::TokenKind::RawIdent => IDENT, - rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind), - rustc_lexer::TokenKind::Lifetime { .. } => LIFETIME, - rustc_lexer::TokenKind::Semi => SEMI, - rustc_lexer::TokenKind::Comma => COMMA, - rustc_lexer::TokenKind::Dot => DOT, - rustc_lexer::TokenKind::OpenParen => L_PAREN, - rustc_lexer::TokenKind::CloseParen => R_PAREN, - rustc_lexer::TokenKind::OpenBrace => L_CURLY, - rustc_lexer::TokenKind::CloseBrace => R_CURLY, - rustc_lexer::TokenKind::OpenBracket => L_BRACK, - rustc_lexer::TokenKind::CloseBracket => R_BRACK, - rustc_lexer::TokenKind::At => AT, - rustc_lexer::TokenKind::Pound => POUND, - rustc_lexer::TokenKind::Tilde => TILDE, - rustc_lexer::TokenKind::Question => QUESTION, - rustc_lexer::TokenKind::Colon => COLON, - rustc_lexer::TokenKind::Dollar => DOLLAR, - rustc_lexer::TokenKind::Eq => EQ, - rustc_lexer::TokenKind::Not => EXCL, - rustc_lexer::TokenKind::Lt => L_ANGLE, - rustc_lexer::TokenKind::Gt => R_ANGLE, - rustc_lexer::TokenKind::Minus => MINUS, - rustc_lexer::TokenKind::And => AMP, - rustc_lexer::TokenKind::Or => PIPE, - rustc_lexer::TokenKind::Plus => PLUS, - rustc_lexer::TokenKind::Star => STAR, - rustc_lexer::TokenKind::Slash => SLASH, - rustc_lexer::TokenKind::Caret => CARET, - rustc_lexer::TokenKind::Percent => PERCENT, - rustc_lexer::TokenKind::Unknown => ERROR, - }; - let token = Token { kind, len: TextUnit::from_usize(rustc_token.len) }; - acc.push(token); - text = &text[rustc_token.len..]; + LK::Char { terminated } => ok_if(terminated, CHAR, TE::UnterminatedChar), + LK::Byte { terminated } => ok_if(terminated, BYTE, TE::UnterminatedByte), + LK::Str { terminated } => ok_if(terminated, STRING, TE::UnterminatedString), + LK::ByteStr { terminated } => { + ok_if(terminated, BYTE_STRING, TE::UnterminatedByteString) + } + + LK::RawStr { started: true, terminated, .. } => { + ok_if(terminated, RAW_STRING, TE::UnterminatedRawString) + } + LK::RawStr { started: false, .. } => err(RAW_STRING, TE::UnstartedRawString), + + LK::RawByteStr { started: true, terminated, .. } => { + ok_if(terminated, RAW_BYTE_STRING, TE::UnterminatedRawByteString) + } + LK::RawByteStr { started: false, .. } => { + err(RAW_BYTE_STRING, TE::UnstartedRawByteString) + } + } + } +} + +pub fn first_token(text: &str) -> Option { + // Checking for emptyness because of `rustc_lexer::first_token()` invariant (see its body) + if text.is_empty() { + None + } else { + let rustc_token = rustc_lexer::first_token(text); + Some(rustc_token_kind_to_parsed_token(&rustc_token.kind, &text[..rustc_token.len])) } - acc } -pub fn classify_literal(text: &str) -> Option { +// TODO: think what to do with this ad hoc function +pub fn classify_literal(text: &str) -> Option { let t = rustc_lexer::first_token(text); if t.len != text.len() { return None; @@ -100,5 +259,5 @@ pub fn classify_literal(text: &str) -> Option { rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind), _ => return None, }; - Some(Token { kind, len: TextUnit::from_usize(t.len) }) + Some(ParsedToken::new(Token::new(kind, TextUnit::from_usize(t.len)))) } -- cgit v1.2.3 From ac37a11f04b31f792068a1cb50dbbf5ccd4d982d Mon Sep 17 00:00:00 2001 From: Veetaha Date: Sun, 26 Jan 2020 20:44:49 +0200 Subject: Reimplemented lexer with vectors instead of iterators --- crates/ra_syntax/src/parsing/lexer.rs | 304 +++++++++++++++++----------------- 1 file changed, 149 insertions(+), 155 deletions(-) (limited to 'crates/ra_syntax/src/parsing/lexer.rs') diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 9dca7d747..6d96f8400 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -1,10 +1,10 @@ //! Lexer analyzes raw input string and produces lexemes (tokens). - -use std::iter::{FromIterator, IntoIterator}; +//! It is just a bridge to `rustc_lexer`. use crate::{ + SyntaxError, SyntaxErrorKind, SyntaxKind::{self, *}, - TextUnit, + TextRange, TextUnit, }; /// A token of Rust source. @@ -15,93 +15,96 @@ pub struct Token { /// The length of the token. pub len: TextUnit, } -impl Token { - pub const fn new(kind: SyntaxKind, len: TextUnit) -> Self { - Self { kind, len } - } -} #[derive(Debug)] -/// Represents the result of parsing one token. +/// Represents the result of parsing one token. Beware that the token may be malformed. pub struct ParsedToken { /// Parsed token. pub token: Token, /// If error is present then parsed token is malformed. - pub error: Option, -} -impl ParsedToken { - pub const fn new(token: Token, error: Option) -> Self { - Self { token, error } - } + pub error: Option, } #[derive(Debug, Default)] -/// Represents the result of parsing one token. +/// Represents the result of parsing source code of Rust language. pub struct ParsedTokens { - /// Parsed token. + /// Parsed tokens in order they appear in source code. pub tokens: Vec, - /// If error is present then parsed token is malformed. - pub errors: Vec, + /// Collection of all occured tokenization errors. + /// In general `self.errors.len() <= self.tokens.len()` + pub errors: Vec, } - -impl FromIterator for ParsedTokens { - fn from_iter>(iter: I) -> Self { - let res = Self::default(); - for entry in iter { - res.tokens.push(entry.token); - if let Some(error) = entry.error { - res.errors.push(error); - } +impl ParsedTokens { + /// Append `token` and `error` (if pressent) to the result. + pub fn push(&mut self, ParsedToken { token, error }: ParsedToken) { + self.tokens.push(token); + if let Some(error) = error { + self.errors.push(error) } - res } } -/// Returns the first encountered token from the string. -/// If the string contains zero or two or more tokens returns `None`. -pub fn single_token(text: &str) -> Option { - // TODO: test whether this condition indeed checks for a single token - first_token(text).filter(|parsed| parsed.token.len.to_usize() == text.len()) +/// Same as `tokenize_append()`, just a shortcut for creating `ParsedTokens` +/// and returning the result the usual way. +pub fn tokenize(text: &str) -> ParsedTokens { + let mut parsed = ParsedTokens::default(); + tokenize_append(text, &mut parsed); + parsed } -/* -/// Returns `ParsedTokens` which are basically a pair `(Vec, Vec)` -/// This is just a shorthand for `tokenize(text).collect()` -pub fn tokenize_to_vec_with_errors(text: &str) -> ParsedTokens { - tokenize(text).collect() -} +/// Break a string up into its component tokens. +/// Returns `ParsedTokens` which are basically a pair `(Vec, Vec)`. +/// Beware that it checks for shebang first and its length contributes to resulting +/// tokens offsets. +pub fn tokenize_append(text: &str, parsed: &mut ParsedTokens) { + // non-empty string is a precondtion of `rustc_lexer::strip_shebang()`. + if text.is_empty() { + return; + } -/// The simplest version of tokenize, it just retunst a ready-made `Vec`. -/// It discards all tokenization errors while parsing. If you need that infromation -/// consider using `tokenize()` or `tokenize_to_vec_with_errors()`. -pub fn tokenize_to_vec(text: &str) -> Vec { - tokenize(text).map(|parsed_token| parsed_token.token).collect() -} -*/ + let mut offset: usize = rustc_lexer::strip_shebang(text) + .map(|shebang_len| { + parsed.tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) }); + shebang_len + }) + .unwrap_or(0); -/// Break a string up into its component tokens -/// This is the core function, all other `tokenize*()` functions are simply -/// handy shortcuts for this one. -pub fn tokenize(text: &str) -> impl Iterator + '_ { - let shebang = rustc_lexer::strip_shebang(text).map(|shebang_len| { - text = &text[shebang_len..]; - ParsedToken::new(Token::new(SHEBANG, TextUnit::from_usize(shebang_len)), None) - }); + let text_without_shebang = &text[offset..]; - // Notice that we eagerly evaluate shebang since it may change text slice - // and we cannot simplify this into a single method call chain - shebang.into_iter().chain(tokenize_without_shebang(text)) + for rustc_token in rustc_lexer::tokenize(text_without_shebang) { + parsed.push(rustc_token_to_parsed_token(&rustc_token, text, TextUnit::from_usize(offset))); + offset += rustc_token.len; + } } -pub fn tokenize_without_shebang(text: &str) -> impl Iterator + '_ { - rustc_lexer::tokenize(text).map(|rustc_token| { - let token_text = &text[..rustc_token.len]; - text = &text[rustc_token.len..]; - rustc_token_kind_to_parsed_token(&rustc_token.kind, token_text) - }) +/// Returns the first encountered token at the beginning of the string. +/// If the string contains zero or *two or more tokens* returns `None`. +/// +/// The main difference between `first_token()` and `single_token()` is that +/// the latter returns `None` if the string contains more than one token. +pub fn single_token(text: &str) -> Option { + first_token(text).filter(|parsed| parsed.token.len.to_usize() == text.len()) } -#[derive(Debug)] +/// Returns the first encountered token at the beginning of the string. +/// If the string contains zero tokens returns `None`. +/// +/// The main difference between `first_token() and single_token()` is that +/// the latter returns `None` if the string contains more than one token. +pub fn first_token(text: &str) -> Option { + // non-empty string is a precondtion of `rustc_lexer::first_token()`. + if text.is_empty() { + None + } else { + let rustc_token = rustc_lexer::first_token(text); + Some(rustc_token_to_parsed_token(&rustc_token, text, TextUnit::from(0))) + } +} + +/// Describes the values of `SyntaxErrorKind::TokenizeError` enum variant. +/// It describes all the types of errors that may happen during the tokenization +/// of Rust source. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum TokenizeError { /// Base prefix was provided, but there were no digits /// after it, e.g. `0x`. @@ -124,94 +127,95 @@ pub enum TokenizeError { /// Raw byte string literal lacks trailing delimiter e.g. `"##` UnterminatedRawByteString, - /// Raw string lacks a quote after pound characters e.g. `r###` + /// Raw string lacks a quote after the pound characters e.g. `r###` UnstartedRawString, - /// Raw byte string lacks a quote after pound characters e.g. `br###` + /// Raw byte string lacks a quote after the pound characters e.g. `br###` UnstartedRawByteString, /// Lifetime starts with a number e.g. `'4ever` LifetimeStartsWithNumber, } -fn rustc_token_kind_to_parsed_token( - rustc_token_kind: &rustc_lexer::TokenKind, - token_text: &str, +/// Mapper function that converts `rustc_lexer::Token` with some additional context +/// to `ParsedToken` +fn rustc_token_to_parsed_token( + rustc_token: &rustc_lexer::Token, + text: &str, + token_start_offset: TextUnit, ) -> ParsedToken { - use rustc_lexer::TokenKind as TK; - use TokenizeError as TE; - // We drop some useful infromation here (see patterns with double dots `..`) // Storing that info in `SyntaxKind` is not possible due to its layout requirements of // being `u16` that come from `rowan::SyntaxKind` type and changes to `rowan::SyntaxKind` - // would mean hell of a rewrite. + // would mean hell of a rewrite - let (syntax_kind, error) = match *rustc_token_kind { - TK::LineComment => ok(COMMENT), - TK::BlockComment { terminated } => ok_if(terminated, COMMENT, TE::UnterminatedBlockComment), - TK::Whitespace => ok(WHITESPACE), - TK::Ident => ok(if token_text == "_" { - UNDERSCORE - } else { - SyntaxKind::from_keyword(token_text).unwrap_or(IDENT) - }), - TK::RawIdent => ok(IDENT), - TK::Literal { kind, .. } => match_literal_kind(&kind), - TK::Lifetime { starts_with_number } => { - ok_if(!starts_with_number, LIFETIME, TE::LifetimeStartsWithNumber) + let token_range = + TextRange::offset_len(token_start_offset, TextUnit::from_usize(rustc_token.len)); + + let token_text = &text[token_range]; + + let (syntax_kind, error) = { + use rustc_lexer::TokenKind as TK; + use TokenizeError as TE; + + match rustc_token.kind { + TK::LineComment => ok(COMMENT), + TK::BlockComment { terminated } => { + ok_if(terminated, COMMENT, TE::UnterminatedBlockComment) + } + TK::Whitespace => ok(WHITESPACE), + TK::Ident => ok(if token_text == "_" { + UNDERSCORE + } else { + SyntaxKind::from_keyword(token_text).unwrap_or(IDENT) + }), + TK::RawIdent => ok(IDENT), + TK::Literal { kind, .. } => match_literal_kind(&kind), + TK::Lifetime { starts_with_number } => { + ok_if(!starts_with_number, LIFETIME, TE::LifetimeStartsWithNumber) + } + TK::Semi => ok(SEMI), + TK::Comma => ok(COMMA), + TK::Dot => ok(DOT), + TK::OpenParen => ok(L_PAREN), + TK::CloseParen => ok(R_PAREN), + TK::OpenBrace => ok(L_CURLY), + TK::CloseBrace => ok(R_CURLY), + TK::OpenBracket => ok(L_BRACK), + TK::CloseBracket => ok(R_BRACK), + TK::At => ok(AT), + TK::Pound => ok(POUND), + TK::Tilde => ok(TILDE), + TK::Question => ok(QUESTION), + TK::Colon => ok(COLON), + TK::Dollar => ok(DOLLAR), + TK::Eq => ok(EQ), + TK::Not => ok(EXCL), + TK::Lt => ok(L_ANGLE), + TK::Gt => ok(R_ANGLE), + TK::Minus => ok(MINUS), + TK::And => ok(AMP), + TK::Or => ok(PIPE), + TK::Plus => ok(PLUS), + TK::Star => ok(STAR), + TK::Slash => ok(SLASH), + TK::Caret => ok(CARET), + TK::Percent => ok(PERCENT), + TK::Unknown => ok(ERROR), } - TK::Semi => ok(SEMI), - TK::Comma => ok(COMMA), - TK::Dot => ok(DOT), - TK::OpenParen => ok(L_PAREN), - TK::CloseParen => ok(R_PAREN), - TK::OpenBrace => ok(L_CURLY), - TK::CloseBrace => ok(R_CURLY), - TK::OpenBracket => ok(L_BRACK), - TK::CloseBracket => ok(R_BRACK), - TK::At => ok(AT), - TK::Pound => ok(POUND), - TK::Tilde => ok(TILDE), - TK::Question => ok(QUESTION), - TK::Colon => ok(COLON), - TK::Dollar => ok(DOLLAR), - TK::Eq => ok(EQ), - TK::Not => ok(EXCL), - TK::Lt => ok(L_ANGLE), - TK::Gt => ok(R_ANGLE), - TK::Minus => ok(MINUS), - TK::And => ok(AMP), - TK::Or => ok(PIPE), - TK::Plus => ok(PLUS), - TK::Star => ok(STAR), - TK::Slash => ok(SLASH), - TK::Caret => ok(CARET), - TK::Percent => ok(PERCENT), - TK::Unknown => ok(ERROR), }; - return ParsedToken::new( - Token::new(syntax_kind, TextUnit::from_usize(token_text.len())), - error, - ); + return ParsedToken { + token: Token { kind: syntax_kind, len: token_range.len() }, + error: error + .map(|error| SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range)), + }; type ParsedSyntaxKind = (SyntaxKind, Option); - const fn ok(syntax_kind: SyntaxKind) -> ParsedSyntaxKind { - (syntax_kind, None) - } - const fn ok_if(cond: bool, syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { - if cond { - ok(syntax_kind) - } else { - err(syntax_kind, error) - } - } - const fn err(syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { - (syntax_kind, Some(error)) - } - - const fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> ParsedSyntaxKind { + fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> ParsedSyntaxKind { use rustc_lexer::LiteralKind as LK; + use TokenizeError as TE; + match *kind { LK::Int { empty_int, .. } => ok_if(!empty_int, INT_NUMBER, TE::EmptyInt), LK::Float { empty_exponent, .. } => { @@ -237,27 +241,17 @@ fn rustc_token_kind_to_parsed_token( } } } -} - -pub fn first_token(text: &str) -> Option { - // Checking for emptyness because of `rustc_lexer::first_token()` invariant (see its body) - if text.is_empty() { - None - } else { - let rustc_token = rustc_lexer::first_token(text); - Some(rustc_token_kind_to_parsed_token(&rustc_token.kind, &text[..rustc_token.len])) + const fn ok(syntax_kind: SyntaxKind) -> ParsedSyntaxKind { + (syntax_kind, None) } -} - -// TODO: think what to do with this ad hoc function -pub fn classify_literal(text: &str) -> Option { - let t = rustc_lexer::first_token(text); - if t.len != text.len() { - return None; + const fn err(syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { + (syntax_kind, Some(error)) + } + fn ok_if(cond: bool, syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { + if cond { + ok(syntax_kind) + } else { + err(syntax_kind, error) + } } - let kind = match t.kind { - rustc_lexer::TokenKind::Literal { kind, .. } => match_literal_kind(kind), - _ => return None, - }; - Some(ParsedToken::new(Token::new(kind, TextUnit::from_usize(t.len)))) } -- cgit v1.2.3 From a2bc4c2a7453f2e72df6f2e6c3273d6b3f0114a9 Mon Sep 17 00:00:00 2001 From: Veetaha Date: Sun, 26 Jan 2020 22:32:23 +0200 Subject: ra_syntax: fixed doc comment --- crates/ra_syntax/src/parsing/lexer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crates/ra_syntax/src/parsing/lexer.rs') diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 6d96f8400..9f321cd06 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -53,7 +53,7 @@ pub fn tokenize(text: &str) -> ParsedTokens { } /// Break a string up into its component tokens. -/// Returns `ParsedTokens` which are basically a pair `(Vec, Vec)`. +/// Writes to `ParsedTokens` which are basically a pair `(Vec, Vec)`. /// Beware that it checks for shebang first and its length contributes to resulting /// tokens offsets. pub fn tokenize_append(text: &str, parsed: &mut ParsedTokens) { -- cgit v1.2.3 From ffe00631d5823070d6ab9e92ae7cfb5dcb04200d Mon Sep 17 00:00:00 2001 From: Veetaha Date: Sun, 26 Jan 2020 22:33:09 +0200 Subject: ra_syntax: moved ParsedToken derive attribute under the doc comment --- crates/ra_syntax/src/parsing/lexer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'crates/ra_syntax/src/parsing/lexer.rs') diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 9f321cd06..7cd7110de 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -16,8 +16,8 @@ pub struct Token { pub len: TextUnit, } -#[derive(Debug)] /// Represents the result of parsing one token. Beware that the token may be malformed. +#[derive(Debug)] pub struct ParsedToken { /// Parsed token. pub token: Token, -- cgit v1.2.3 From c6d0881382548da9e6f8a8362306e7c3948b84b8 Mon Sep 17 00:00:00 2001 From: Veetaha Date: Mon, 27 Jan 2020 01:38:16 +0200 Subject: add better docs for tokenize errors --- crates/ra_syntax/src/parsing/lexer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'crates/ra_syntax/src/parsing/lexer.rs') diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 7cd7110de..bf6b4d637 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -107,9 +107,9 @@ pub fn first_token(text: &str) -> Option { #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum TokenizeError { /// Base prefix was provided, but there were no digits - /// after it, e.g. `0x`. + /// after it, e.g. `0x`, `0b`. EmptyInt, - /// Float exponent lacks digits e.g. `e+`, `E+`, `e-`, `E-`, + /// Float exponent lacks digits e.g. `12.34e+`, `12.3E+`, `12e-`, `1_E-`, EmptyExponent, /// Block comment lacks trailing delimiter `*/` -- cgit v1.2.3 From 9e7eaa959f9dc368a55f1a80b35651b78b3d0883 Mon Sep 17 00:00:00 2001 From: Veetaha Date: Tue, 28 Jan 2020 07:09:13 +0200 Subject: ra_syntax: refactored the lexer design as per @matklad and @kiljacken PR review --- crates/ra_syntax/src/parsing/lexer.rs | 313 ++++++++++++++++++---------------- 1 file changed, 165 insertions(+), 148 deletions(-) (limited to 'crates/ra_syntax/src/parsing/lexer.rs') diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index bf6b4d637..55755be18 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -16,55 +16,21 @@ pub struct Token { pub len: TextUnit, } -/// Represents the result of parsing one token. Beware that the token may be malformed. -#[derive(Debug)] -pub struct ParsedToken { - /// Parsed token. - pub token: Token, - /// If error is present then parsed token is malformed. - pub error: Option, -} - -#[derive(Debug, Default)] -/// Represents the result of parsing source code of Rust language. -pub struct ParsedTokens { - /// Parsed tokens in order they appear in source code. - pub tokens: Vec, - /// Collection of all occured tokenization errors. - /// In general `self.errors.len() <= self.tokens.len()` - pub errors: Vec, -} -impl ParsedTokens { - /// Append `token` and `error` (if pressent) to the result. - pub fn push(&mut self, ParsedToken { token, error }: ParsedToken) { - self.tokens.push(token); - if let Some(error) = error { - self.errors.push(error) - } - } -} - -/// Same as `tokenize_append()`, just a shortcut for creating `ParsedTokens` -/// and returning the result the usual way. -pub fn tokenize(text: &str) -> ParsedTokens { - let mut parsed = ParsedTokens::default(); - tokenize_append(text, &mut parsed); - parsed -} - /// Break a string up into its component tokens. -/// Writes to `ParsedTokens` which are basically a pair `(Vec, Vec)`. /// Beware that it checks for shebang first and its length contributes to resulting /// tokens offsets. -pub fn tokenize_append(text: &str, parsed: &mut ParsedTokens) { +pub fn tokenize(text: &str) -> (Vec, Vec) { // non-empty string is a precondtion of `rustc_lexer::strip_shebang()`. if text.is_empty() { - return; + return Default::default(); } + let mut tokens = Vec::new(); + let mut errors = Vec::new(); + let mut offset: usize = rustc_lexer::strip_shebang(text) .map(|shebang_len| { - parsed.tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) }); + tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) }); shebang_len }) .unwrap_or(0); @@ -72,35 +38,76 @@ pub fn tokenize_append(text: &str, parsed: &mut ParsedTokens) { let text_without_shebang = &text[offset..]; for rustc_token in rustc_lexer::tokenize(text_without_shebang) { - parsed.push(rustc_token_to_parsed_token(&rustc_token, text, TextUnit::from_usize(offset))); + let token_len = TextUnit::from_usize(rustc_token.len); + let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len); + + let (syntax_kind, error) = + rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]); + + tokens.push(Token { kind: syntax_kind, len: token_len }); + + if let Some(error) = error { + errors.push(SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range)); + } + offset += rustc_token.len; } + + (tokens, errors) } -/// Returns the first encountered token at the beginning of the string. -/// If the string contains zero or *two or more tokens* returns `None`. +/// Returns `SyntaxKind` and `Option` of the first token +/// encountered at the beginning of the string. +/// +/// Returns `None` if the string contains zero *or two or more* tokens. +/// The token is malformed if the returned error is not `None`. +/// +/// Beware that unescape errors are not checked at tokenization time. +pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option)> { + first_token(text) + .filter(|(token, _)| token.len.to_usize() == text.len()) + .map(|(token, error)| (token.kind, error)) +} + +/// The same as `single_syntax_kind()` but returns only `SyntaxKind` and +/// returns `None` if any tokenization error occured. /// -/// The main difference between `first_token()` and `single_token()` is that -/// the latter returns `None` if the string contains more than one token. -pub fn single_token(text: &str) -> Option { - first_token(text).filter(|parsed| parsed.token.len.to_usize() == text.len()) +/// Beware that unescape errors are not checked at tokenization time. +pub fn lex_single_valid_syntax_kind(text: &str) -> Option { + first_token(text) + .filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len()) + .map(|(token, _error)| token.kind) } /// Returns the first encountered token at the beginning of the string. -/// If the string contains zero tokens returns `None`. /// -/// The main difference between `first_token() and single_token()` is that -/// the latter returns `None` if the string contains more than one token. -pub fn first_token(text: &str) -> Option { +/// Returns `None` if the string contains zero tokens or if the token was parsed +/// with an error. +/// +/// Beware that unescape errors are not checked at tokenization time. +fn first_token(text: &str) -> Option<(Token, Option)> { // non-empty string is a precondtion of `rustc_lexer::first_token()`. if text.is_empty() { - None - } else { - let rustc_token = rustc_lexer::first_token(text); - Some(rustc_token_to_parsed_token(&rustc_token, text, TextUnit::from(0))) + return None; } + + let rustc_token = rustc_lexer::first_token(text); + let (syntax_kind, error) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text); + + let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) }; + let error = error.map(|error| { + SyntaxError::new( + SyntaxErrorKind::TokenizeError(error), + TextRange::from_to(TextUnit::from(0), TextUnit::of_str(text)), + ) + }); + + Some((token, error)) } +// FIXME: simplify TokenizeError to `SyntaxError(String, TextRange)` as per @matklad advice: +// https://github.com/rust-analyzer/rust-analyzer/pull/2911/files#r371175067 + /// Describes the values of `SyntaxErrorKind::TokenizeError` enum variant. /// It describes all the types of errors that may happen during the tokenization /// of Rust source. @@ -136,122 +143,132 @@ pub enum TokenizeError { LifetimeStartsWithNumber, } -/// Mapper function that converts `rustc_lexer::Token` with some additional context -/// to `ParsedToken` -fn rustc_token_to_parsed_token( - rustc_token: &rustc_lexer::Token, - text: &str, - token_start_offset: TextUnit, -) -> ParsedToken { +fn rustc_token_kind_to_syntax_kind( + rustc_token_kind: &rustc_lexer::TokenKind, + token_text: &str, +) -> (SyntaxKind, Option) { + // A note on an intended tradeoff: // We drop some useful infromation here (see patterns with double dots `..`) // Storing that info in `SyntaxKind` is not possible due to its layout requirements of - // being `u16` that come from `rowan::SyntaxKind` type and changes to `rowan::SyntaxKind` - // would mean hell of a rewrite + // being `u16` that come from `rowan::SyntaxKind`. - let token_range = - TextRange::offset_len(token_start_offset, TextUnit::from_usize(rustc_token.len)); - - let token_text = &text[token_range]; - - let (syntax_kind, error) = { + let syntax_kind = { use rustc_lexer::TokenKind as TK; use TokenizeError as TE; - match rustc_token.kind { - TK::LineComment => ok(COMMENT), - TK::BlockComment { terminated } => { - ok_if(terminated, COMMENT, TE::UnterminatedBlockComment) + match rustc_token_kind { + TK::LineComment => COMMENT, + + TK::BlockComment { terminated: true } => COMMENT, + TK::BlockComment { terminated: false } => { + return (COMMENT, Some(TE::UnterminatedBlockComment)); } - TK::Whitespace => ok(WHITESPACE), - TK::Ident => ok(if token_text == "_" { - UNDERSCORE - } else { - SyntaxKind::from_keyword(token_text).unwrap_or(IDENT) - }), - TK::RawIdent => ok(IDENT), - TK::Literal { kind, .. } => match_literal_kind(&kind), - TK::Lifetime { starts_with_number } => { - ok_if(!starts_with_number, LIFETIME, TE::LifetimeStartsWithNumber) + + TK::Whitespace => WHITESPACE, + + TK::Ident => { + if token_text == "_" { + UNDERSCORE + } else { + SyntaxKind::from_keyword(token_text).unwrap_or(IDENT) + } } - TK::Semi => ok(SEMI), - TK::Comma => ok(COMMA), - TK::Dot => ok(DOT), - TK::OpenParen => ok(L_PAREN), - TK::CloseParen => ok(R_PAREN), - TK::OpenBrace => ok(L_CURLY), - TK::CloseBrace => ok(R_CURLY), - TK::OpenBracket => ok(L_BRACK), - TK::CloseBracket => ok(R_BRACK), - TK::At => ok(AT), - TK::Pound => ok(POUND), - TK::Tilde => ok(TILDE), - TK::Question => ok(QUESTION), - TK::Colon => ok(COLON), - TK::Dollar => ok(DOLLAR), - TK::Eq => ok(EQ), - TK::Not => ok(EXCL), - TK::Lt => ok(L_ANGLE), - TK::Gt => ok(R_ANGLE), - TK::Minus => ok(MINUS), - TK::And => ok(AMP), - TK::Or => ok(PIPE), - TK::Plus => ok(PLUS), - TK::Star => ok(STAR), - TK::Slash => ok(SLASH), - TK::Caret => ok(CARET), - TK::Percent => ok(PERCENT), - TK::Unknown => ok(ERROR), - } - }; - return ParsedToken { - token: Token { kind: syntax_kind, len: token_range.len() }, - error: error - .map(|error| SyntaxError::new(SyntaxErrorKind::TokenizeError(error), token_range)), + TK::RawIdent => IDENT, + TK::Literal { kind, .. } => return match_literal_kind(&kind), + + TK::Lifetime { starts_with_number: false } => LIFETIME, + TK::Lifetime { starts_with_number: true } => { + return (LIFETIME, Some(TE::LifetimeStartsWithNumber)) + } + + TK::Semi => SEMI, + TK::Comma => COMMA, + TK::Dot => DOT, + TK::OpenParen => L_PAREN, + TK::CloseParen => R_PAREN, + TK::OpenBrace => L_CURLY, + TK::CloseBrace => R_CURLY, + TK::OpenBracket => L_BRACK, + TK::CloseBracket => R_BRACK, + TK::At => AT, + TK::Pound => POUND, + TK::Tilde => TILDE, + TK::Question => QUESTION, + TK::Colon => COLON, + TK::Dollar => DOLLAR, + TK::Eq => EQ, + TK::Not => EXCL, + TK::Lt => L_ANGLE, + TK::Gt => R_ANGLE, + TK::Minus => MINUS, + TK::And => AMP, + TK::Or => PIPE, + TK::Plus => PLUS, + TK::Star => STAR, + TK::Slash => SLASH, + TK::Caret => CARET, + TK::Percent => PERCENT, + TK::Unknown => ERROR, + } }; - type ParsedSyntaxKind = (SyntaxKind, Option); + return (syntax_kind, None); - fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> ParsedSyntaxKind { + fn match_literal_kind(kind: &rustc_lexer::LiteralKind) -> (SyntaxKind, Option) { use rustc_lexer::LiteralKind as LK; use TokenizeError as TE; - match *kind { - LK::Int { empty_int, .. } => ok_if(!empty_int, INT_NUMBER, TE::EmptyInt), - LK::Float { empty_exponent, .. } => { - ok_if(!empty_exponent, FLOAT_NUMBER, TE::EmptyExponent) + #[rustfmt::skip] + let syntax_kind = match *kind { + LK::Int { empty_int: false, .. } => INT_NUMBER, + LK::Int { empty_int: true, .. } => { + return (INT_NUMBER, Some(TE::EmptyInt)) + } + + LK::Float { empty_exponent: false, .. } => FLOAT_NUMBER, + LK::Float { empty_exponent: true, .. } => { + return (FLOAT_NUMBER, Some(TE::EmptyExponent)) + } + + LK::Char { terminated: true } => CHAR, + LK::Char { terminated: false } => { + return (CHAR, Some(TE::UnterminatedChar)) + } + + LK::Byte { terminated: true } => BYTE, + LK::Byte { terminated: false } => { + return (BYTE, Some(TE::UnterminatedByte)) } - LK::Char { terminated } => ok_if(terminated, CHAR, TE::UnterminatedChar), - LK::Byte { terminated } => ok_if(terminated, BYTE, TE::UnterminatedByte), - LK::Str { terminated } => ok_if(terminated, STRING, TE::UnterminatedString), - LK::ByteStr { terminated } => { - ok_if(terminated, BYTE_STRING, TE::UnterminatedByteString) + + LK::Str { terminated: true } => STRING, + LK::Str { terminated: false } => { + return (STRING, Some(TE::UnterminatedString)) + } + + + LK::ByteStr { terminated: true } => BYTE_STRING, + LK::ByteStr { terminated: false } => { + return (BYTE_STRING, Some(TE::UnterminatedByteString)) } - LK::RawStr { started: true, terminated, .. } => { - ok_if(terminated, RAW_STRING, TE::UnterminatedRawString) + LK::RawStr { started: true, terminated: true, .. } => RAW_STRING, + LK::RawStr { started: true, terminated: false, .. } => { + return (RAW_STRING, Some(TE::UnterminatedRawString)) + } + LK::RawStr { started: false, .. } => { + return (RAW_STRING, Some(TE::UnstartedRawString)) } - LK::RawStr { started: false, .. } => err(RAW_STRING, TE::UnstartedRawString), - LK::RawByteStr { started: true, terminated, .. } => { - ok_if(terminated, RAW_BYTE_STRING, TE::UnterminatedRawByteString) + LK::RawByteStr { started: true, terminated: true, .. } => RAW_BYTE_STRING, + LK::RawByteStr { started: true, terminated: false, .. } => { + return (RAW_BYTE_STRING, Some(TE::UnterminatedRawByteString)) } LK::RawByteStr { started: false, .. } => { - err(RAW_BYTE_STRING, TE::UnstartedRawByteString) + return (RAW_BYTE_STRING, Some(TE::UnstartedRawByteString)) } - } - } - const fn ok(syntax_kind: SyntaxKind) -> ParsedSyntaxKind { + }; + (syntax_kind, None) } - const fn err(syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { - (syntax_kind, Some(error)) - } - fn ok_if(cond: bool, syntax_kind: SyntaxKind, error: TokenizeError) -> ParsedSyntaxKind { - if cond { - ok(syntax_kind) - } else { - err(syntax_kind, error) - } - } } -- cgit v1.2.3 From b1764d85fced5f3bc1db82063fca9369f9e1740b Mon Sep 17 00:00:00 2001 From: Veetaha Date: Tue, 28 Jan 2020 07:13:18 +0200 Subject: ra_syntax: fixed a typo in doc comment --- crates/ra_syntax/src/parsing/lexer.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'crates/ra_syntax/src/parsing/lexer.rs') diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 55755be18..d1315e604 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -69,7 +69,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option Option { .map(|(token, _error)| token.kind) } -/// Returns the first encountered token at the beginning of the string. +/// Returns `SyntaxKind` and `Option` of the first token +/// encountered at the beginning of the string. /// /// Returns `None` if the string contains zero tokens or if the token was parsed /// with an error. +/// The token is malformed if the returned error is not `None`. /// /// Beware that unescape errors are not checked at tokenization time. fn first_token(text: &str) -> Option<(Token, Option)> { -- cgit v1.2.3 From 58e01d875477234c132061e3072ac19f4dfb7a32 Mon Sep 17 00:00:00 2001 From: Veetaha Date: Tue, 28 Jan 2020 07:18:35 +0200 Subject: ra_syntax: rename first_token() -> lex_first_token() --- crates/ra_syntax/src/parsing/lexer.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'crates/ra_syntax/src/parsing/lexer.rs') diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index d1315e604..f889e6a1d 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -64,7 +64,7 @@ pub fn tokenize(text: &str) -> (Vec, Vec) { /// /// Beware that unescape errors are not checked at tokenization time. pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option)> { - first_token(text) + lex_first_token(text) .filter(|(token, _)| token.len.to_usize() == text.len()) .map(|(token, error)| (token.kind, error)) } @@ -74,7 +74,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option Option { - first_token(text) + lex_first_token(text) .filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len()) .map(|(token, _error)| token.kind) } @@ -87,7 +87,7 @@ pub fn lex_single_valid_syntax_kind(text: &str) -> Option { /// The token is malformed if the returned error is not `None`. /// /// Beware that unescape errors are not checked at tokenization time. -fn first_token(text: &str) -> Option<(Token, Option)> { +fn lex_first_token(text: &str) -> Option<(Token, Option)> { // non-empty string is a precondtion of `rustc_lexer::first_token()`. if text.is_empty() { return None; -- cgit v1.2.3