aboutsummaryrefslogtreecommitdiff
path: root/crates
diff options
context:
space:
mode:
authorAleksey Kladov <[email protected]>2020-10-07 10:49:31 +0100
committerAleksey Kladov <[email protected]>2020-10-07 10:55:20 +0100
commitfd8622e1ec6371b7cef6fef50f312bc767b317df (patch)
treebdc3ea8635a014692a38aae8d70542c782766c63 /crates
parentfaddea935332de3156a5462baa07136bf2e98bf9 (diff)
Cleanup
Diffstat (limited to 'crates')
-rw-r--r--crates/mbe/src/subtree_source.rs22
-rw-r--r--crates/syntax/src/parsing/lexer.rs23
2 files changed, 23 insertions, 22 deletions
diff --git a/crates/mbe/src/subtree_source.rs b/crates/mbe/src/subtree_source.rs
index 226dc3bec..396ce8b16 100644
--- a/crates/mbe/src/subtree_source.rs
+++ b/crates/mbe/src/subtree_source.rs
@@ -2,7 +2,7 @@
2 2
3use parser::{Token, TokenSource}; 3use parser::{Token, TokenSource};
4use std::cell::{Cell, Ref, RefCell}; 4use std::cell::{Cell, Ref, RefCell};
5use syntax::{tokenize, SmolStr, SyntaxKind, SyntaxKind::*, T}; 5use syntax::{lex_single_syntax_kind, SmolStr, SyntaxKind, SyntaxKind::*, T};
6use tt::buffer::{Cursor, TokenBuffer}; 6use tt::buffer::{Cursor, TokenBuffer};
7 7
8#[derive(Debug, Clone, Eq, PartialEq)] 8#[derive(Debug, Clone, Eq, PartialEq)]
@@ -155,17 +155,15 @@ fn convert_delim(d: Option<tt::DelimiterKind>, closing: bool) -> TtToken {
155} 155}
156 156
157fn convert_literal(l: &tt::Literal) -> TtToken { 157fn convert_literal(l: &tt::Literal) -> TtToken {
158 let mut kinds = tokenize(&l.text).0.into_iter().map(|token| token.kind); 158 let is_negated = l.text.starts_with('-');
159 159 let inner_text = &l.text[if is_negated { 1 } else { 0 }..];
160 let kind = match kinds.next() { 160
161 Some(kind) if kind.is_literal() => Some(kind), 161 let kind = lex_single_syntax_kind(inner_text)
162 Some(SyntaxKind::MINUS) => match kinds.next() { 162 .map(|(kind, _error)| kind)
163 Some(kind) if kind.is_literal() => Some(kind), 163 .filter(|kind| {
164 _ => None, 164 kind.is_literal() && (!is_negated || matches!(kind, FLOAT_NUMBER | INT_NUMBER))
165 }, 165 })
166 _ => None, 166 .unwrap_or_else(|| panic!("Fail to convert given literal {:#?}", &l));
167 }
168 .unwrap_or_else(|| panic!("Fail to convert given literal {:#?}", &l));
169 167
170 TtToken { kind, is_joint_to_next: false, text: l.text.clone() } 168 TtToken { kind, is_joint_to_next: false, text: l.text.clone() }
171} 169}
diff --git a/crates/syntax/src/parsing/lexer.rs b/crates/syntax/src/parsing/lexer.rs
index f1202113b..7e38c32cc 100644
--- a/crates/syntax/src/parsing/lexer.rs
+++ b/crates/syntax/src/parsing/lexer.rs
@@ -1,10 +1,10 @@
1//! Lexer analyzes raw input string and produces lexemes (tokens). 1//! Lexer analyzes raw input string and produces lexemes (tokens).
2//! It is just a bridge to `rustc_lexer`. 2//! It is just a bridge to `rustc_lexer`.
3 3
4use rustc_lexer::{LiteralKind as LK, RawStrError};
5
6use std::convert::TryInto; 4use std::convert::TryInto;
7 5
6use rustc_lexer::{LiteralKind as LK, RawStrError};
7
8use crate::{ 8use crate::{
9 SyntaxError, 9 SyntaxError,
10 SyntaxKind::{self, *}, 10 SyntaxKind::{self, *},
@@ -61,17 +61,18 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
61 (tokens, errors) 61 (tokens, errors)
62} 62}
63 63
64/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token 64/// Returns `SyntaxKind` and `Option<SyntaxError>` if `text` parses as a single token.
65/// encountered at the beginning of the string.
66/// 65///
67/// Returns `None` if the string contains zero *or two or more* tokens. 66/// Returns `None` if the string contains zero *or two or more* tokens.
68/// The token is malformed if the returned error is not `None`. 67/// The token is malformed if the returned error is not `None`.
69/// 68///
70/// Beware that unescape errors are not checked at tokenization time. 69/// Beware that unescape errors are not checked at tokenization time.
71pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> { 70pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
72 lex_first_token(text) 71 let (first_token, err) = lex_first_token(text)?;
73 .filter(|(token, _)| token.len == TextSize::of(text)) 72 if first_token.len != TextSize::of(text) {
74 .map(|(token, error)| (token.kind, error)) 73 return None;
74 }
75 Some((first_token.kind, err))
75} 76}
76 77
77/// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and 78/// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and
@@ -79,9 +80,11 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr
79/// 80///
80/// Beware that unescape errors are not checked at tokenization time. 81/// Beware that unescape errors are not checked at tokenization time.
81pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> { 82pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
82 lex_first_token(text) 83 let (single_token, err) = lex_single_syntax_kind(text)?;
83 .filter(|(token, error)| !error.is_some() && token.len == TextSize::of(text)) 84 if err.is_some() {
84 .map(|(token, _error)| token.kind) 85 return None;
86 }
87 Some(single_token)
85} 88}
86 89
87/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token 90/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token