From b1d5817dd18b7b5fc102a63b084b1ee7ff4f9996 Mon Sep 17 00:00:00 2001 From: Aleksey Kladov Date: Fri, 24 Apr 2020 23:40:41 +0200 Subject: Convert code to text-size --- crates/ra_syntax/src/parsing/lexer.rs | 18 +++++++++--------- crates/ra_syntax/src/parsing/reparsing.rs | 4 ++-- crates/ra_syntax/src/parsing/text_token_source.rs | 8 ++++---- crates/ra_syntax/src/parsing/text_tree_sink.rs | 14 +++++++------- 4 files changed, 22 insertions(+), 22 deletions(-) (limited to 'crates/ra_syntax/src/parsing') diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 67c1f1b48..1fdc76d98 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -4,7 +4,7 @@ use crate::{ SyntaxError, SyntaxKind::{self, *}, - TextRange, TextUnit, T, + TextRange, TextSize, T, }; /// A token of Rust source. @@ -13,7 +13,7 @@ pub struct Token { /// The kind of token. pub kind: SyntaxKind, /// The length of the token. - pub len: TextUnit, + pub len: TextSize, } /// Break a string up into its component tokens. @@ -30,7 +30,7 @@ pub fn tokenize(text: &str) -> (Vec, Vec) { let mut offset: usize = rustc_lexer::strip_shebang(text) .map(|shebang_len| { - tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) }); + tokens.push(Token { kind: SHEBANG, len: TextSize::from_usize(shebang_len) }); shebang_len }) .unwrap_or(0); @@ -38,8 +38,8 @@ pub fn tokenize(text: &str) -> (Vec, Vec) { let text_without_shebang = &text[offset..]; for rustc_token in rustc_lexer::tokenize(text_without_shebang) { - let token_len = TextUnit::from_usize(rustc_token.len); - let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len); + let token_len = TextSize::from_usize(rustc_token.len); + let token_range = TextRange::at(TextSize::from_usize(offset), token_len); let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]); @@ -65,7 +65,7 @@ pub fn tokenize(text: &str) -> (Vec, Vec) { /// Beware that unescape errors are not checked at tokenization time. pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option)> { lex_first_token(text) - .filter(|(token, _)| token.len == TextUnit::of_str(text)) + .filter(|(token, _)| token.len == TextSize::of(text)) .map(|(token, error)| (token.kind, error)) } @@ -75,7 +75,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option Option { lex_first_token(text) - .filter(|(token, error)| !error.is_some() && token.len == TextUnit::of_str(text)) + .filter(|(token, error)| !error.is_some() && token.len == TextSize::of(text)) .map(|(token, _error)| token.kind) } @@ -96,9 +96,9 @@ fn lex_first_token(text: &str) -> Option<(Token, Option)> { let rustc_token = rustc_lexer::first_token(text); let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text); - let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) }; + let token = Token { kind: syntax_kind, len: TextSize::from_usize(rustc_token.len) }; let optional_error = err_message.map(|err_message| { - SyntaxError::new(err_message, TextRange::from_to(0.into(), TextUnit::of_str(text))) + SyntaxError::new(err_message, TextRange::new(0.into(), TextSize::of(text))) }); Some((token, optional_error)) diff --git a/crates/ra_syntax/src/parsing/reparsing.rs b/crates/ra_syntax/src/parsing/reparsing.rs index 2d65b91f1..ffff0a7b2 100644 --- a/crates/ra_syntax/src/parsing/reparsing.rs +++ b/crates/ra_syntax/src/parsing/reparsing.rs @@ -19,7 +19,7 @@ use crate::{ syntax_node::{GreenNode, GreenToken, NodeOrToken, SyntaxElement, SyntaxNode}, SyntaxError, SyntaxKind::*, - TextRange, TextUnit, T, + TextRange, TextSize, T, }; pub(crate) fn incremental_reparse( @@ -176,7 +176,7 @@ fn merge_errors( if old_err_range.end() <= range_before_reparse.start() { res.push(old_err); } else if old_err_range.start() >= range_before_reparse.end() { - let inserted_len = TextUnit::of_str(&edit.insert); + let inserted_len = TextSize::of(&edit.insert); res.push(old_err.with_range((old_err_range + inserted_len) - edit.delete.len())); // Note: extra parens are intentional to prevent uint underflow, HWAB (here was a bug) } diff --git a/crates/ra_syntax/src/parsing/text_token_source.rs b/crates/ra_syntax/src/parsing/text_token_source.rs index e2433913c..7ddc2c2c3 100644 --- a/crates/ra_syntax/src/parsing/text_token_source.rs +++ b/crates/ra_syntax/src/parsing/text_token_source.rs @@ -3,7 +3,7 @@ use ra_parser::Token as PToken; use ra_parser::TokenSource; -use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextUnit}; +use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextSize}; pub(crate) struct TextTokenSource<'t> { text: &'t str, @@ -15,7 +15,7 @@ pub(crate) struct TextTokenSource<'t> { /// 0 7 10 /// ``` /// (token, start_offset): `[(struct, 0), (Foo, 7), (;, 10)]` - start_offsets: Vec, + start_offsets: Vec, /// non-whitespace/comment tokens /// ```non-rust /// struct Foo {} @@ -51,12 +51,12 @@ impl<'t> TokenSource for TextTokenSource<'t> { if pos >= self.tokens.len() { return false; } - let range = TextRange::offset_len(self.start_offsets[pos], self.tokens[pos].len); + let range = TextRange::at(self.start_offsets[pos], self.tokens[pos].len); self.text[range] == *kw } } -fn mk_token(pos: usize, start_offsets: &[TextUnit], tokens: &[Token]) -> PToken { +fn mk_token(pos: usize, start_offsets: &[TextSize], tokens: &[Token]) -> PToken { let kind = tokens.get(pos).map(|t| t.kind).unwrap_or(EOF); let is_jointed_to_next = if pos + 1 < start_offsets.len() { start_offsets[pos] + tokens[pos].len == start_offsets[pos + 1] diff --git a/crates/ra_syntax/src/parsing/text_tree_sink.rs b/crates/ra_syntax/src/parsing/text_tree_sink.rs index 87bb21cd9..22aed1db1 100644 --- a/crates/ra_syntax/src/parsing/text_tree_sink.rs +++ b/crates/ra_syntax/src/parsing/text_tree_sink.rs @@ -9,7 +9,7 @@ use crate::{ syntax_node::GreenNode, SmolStr, SyntaxError, SyntaxKind::{self, *}, - SyntaxTreeBuilder, TextRange, TextUnit, + SyntaxTreeBuilder, TextRange, TextSize, }; /// Bridges the parser with our specific syntax tree representation. @@ -18,7 +18,7 @@ use crate::{ pub(crate) struct TextTreeSink<'a> { text: &'a str, tokens: &'a [Token], - text_pos: TextUnit, + text_pos: TextSize, token_pos: usize, state: State, inner: SyntaxTreeBuilder, @@ -42,7 +42,7 @@ impl<'a> TreeSink for TextTreeSink<'a> { let len = self.tokens[self.token_pos..self.token_pos + n_tokens] .iter() .map(|it| it.len) - .sum::(); + .sum::(); self.do_token(kind, len, n_tokens); } @@ -62,12 +62,12 @@ impl<'a> TreeSink for TextTreeSink<'a> { self.tokens[self.token_pos..].iter().take_while(|it| it.kind.is_trivia()).count(); let leading_trivias = &self.tokens[self.token_pos..self.token_pos + n_trivias]; let mut trivia_end = - self.text_pos + leading_trivias.iter().map(|it| it.len).sum::(); + self.text_pos + leading_trivias.iter().map(|it| it.len).sum::(); let n_attached_trivias = { let leading_trivias = leading_trivias.iter().rev().map(|it| { let next_end = trivia_end - it.len; - let range = TextRange::from_to(next_end, trivia_end); + let range = TextRange::new(next_end, trivia_end); trivia_end = next_end; (it.kind, &self.text[range]) }); @@ -132,8 +132,8 @@ impl<'a> TextTreeSink<'a> { } } - fn do_token(&mut self, kind: SyntaxKind, len: TextUnit, n_tokens: usize) { - let range = TextRange::offset_len(self.text_pos, len); + fn do_token(&mut self, kind: SyntaxKind, len: TextSize, n_tokens: usize) { + let range = TextRange::at(self.text_pos, len); let text: SmolStr = self.text[range].into(); self.text_pos += len; self.token_pos += n_tokens; -- cgit v1.2.3