From b1d5817dd18b7b5fc102a63b084b1ee7ff4f9996 Mon Sep 17 00:00:00 2001 From: Aleksey Kladov Date: Fri, 24 Apr 2020 23:40:41 +0200 Subject: Convert code to text-size --- crates/ra_syntax/src/algo.rs | 12 ++++----- crates/ra_syntax/src/ast/tokens.rs | 32 ++++++++++------------- crates/ra_syntax/src/fuzz.rs | 8 +++--- crates/ra_syntax/src/lib.rs | 4 +-- crates/ra_syntax/src/parsing/lexer.rs | 18 ++++++------- crates/ra_syntax/src/parsing/reparsing.rs | 4 +-- crates/ra_syntax/src/parsing/text_token_source.rs | 8 +++--- crates/ra_syntax/src/parsing/text_tree_sink.rs | 14 +++++----- crates/ra_syntax/src/ptr.rs | 2 +- crates/ra_syntax/src/syntax_error.rs | 6 ++--- crates/ra_syntax/src/syntax_node.rs | 4 +-- crates/ra_syntax/src/tests.rs | 8 +++--- crates/ra_syntax/src/validation.rs | 4 +-- 13 files changed, 59 insertions(+), 65 deletions(-) (limited to 'crates/ra_syntax/src') diff --git a/crates/ra_syntax/src/algo.rs b/crates/ra_syntax/src/algo.rs index 06df8495c..2a8dac757 100644 --- a/crates/ra_syntax/src/algo.rs +++ b/crates/ra_syntax/src/algo.rs @@ -11,7 +11,7 @@ use rustc_hash::FxHashMap; use crate::{ AstNode, Direction, NodeOrToken, SyntaxElement, SyntaxKind, SyntaxNode, SyntaxNodePtr, - SyntaxToken, TextRange, TextUnit, + SyntaxToken, TextRange, TextSize, }; /// Returns ancestors of the node at the offset, sorted by length. This should @@ -21,7 +21,7 @@ use crate::{ /// t.parent().ancestors())`. pub fn ancestors_at_offset( node: &SyntaxNode, - offset: TextUnit, + offset: TextSize, ) -> impl Iterator { node.token_at_offset(offset) .map(|token| token.parent().ancestors()) @@ -37,7 +37,7 @@ pub fn ancestors_at_offset( /// ``` /// /// then the shorter node will be silently preferred. -pub fn find_node_at_offset(syntax: &SyntaxNode, offset: TextUnit) -> Option { +pub fn find_node_at_offset(syntax: &SyntaxNode, offset: TextSize) -> Option { ancestors_at_offset(syntax, offset).find_map(N::cast) } @@ -180,7 +180,7 @@ fn _insert_children( position: InsertPosition, to_insert: &mut dyn Iterator, ) -> SyntaxNode { - let mut delta = TextUnit::default(); + let mut delta = TextSize::default(); let to_insert = to_insert.map(|element| { delta += element.text_range().len(); to_green_element(element) @@ -347,7 +347,7 @@ fn with_children( parent: &SyntaxNode, new_children: Vec>, ) -> SyntaxNode { - let len = new_children.iter().map(|it| it.text_len()).sum::(); + let len = new_children.iter().map(|it| it.text_len()).sum::(); let new_node = rowan::GreenNode::new(rowan::SyntaxKind(parent.kind() as u16), new_children); let new_root_node = parent.replace_with(new_node); let new_root_node = SyntaxNode::new_root(new_root_node); @@ -355,7 +355,7 @@ fn with_children( // FIXME: use a more elegant way to re-fetch the node (#1185), make // `range` private afterwards let mut ptr = SyntaxNodePtr::new(parent); - ptr.range = TextRange::offset_len(ptr.range.start(), len); + ptr.range = TextRange::at(ptr.range.start(), len); ptr.to_node(&new_root_node) } diff --git a/crates/ra_syntax/src/ast/tokens.rs b/crates/ra_syntax/src/ast/tokens.rs index aa34b682d..26b8f9c36 100644 --- a/crates/ra_syntax/src/ast/tokens.rs +++ b/crates/ra_syntax/src/ast/tokens.rs @@ -2,7 +2,7 @@ use crate::{ ast::{AstToken, Comment, RawString, String, Whitespace}, - TextRange, TextUnit, + TextRange, TextSize, }; impl Comment { @@ -94,14 +94,14 @@ impl QuoteOffsets { return None; } - let start = TextUnit::from(0); - let left_quote = TextUnit::from_usize(left_quote) + TextUnit::of_char('"'); - let right_quote = TextUnit::from_usize(right_quote); - let end = TextUnit::of_str(literal); + let start = TextSize::from(0); + let left_quote = TextSize::from_usize(left_quote) + TextSize::of('"'); + let right_quote = TextSize::from_usize(right_quote); + let end = TextSize::of(literal); let res = QuoteOffsets { - quotes: [TextRange::from_to(start, left_quote), TextRange::from_to(right_quote, end)], - contents: TextRange::from_to(left_quote, right_quote), + quotes: [TextRange::new(start, left_quote), TextRange::new(right_quote, end)], + contents: TextRange::new(left_quote, right_quote), }; Some(res) } @@ -168,7 +168,7 @@ impl HasStringValue for RawString { impl RawString { pub fn map_range_up(&self, range: TextRange) -> Option { let contents_range = self.text_range_between_quotes()?; - assert!(range.is_subrange(&TextRange::offset_len(0.into(), contents_range.len()))); + assert!(TextRange::up_to(contents_range.len()).contains_range(range)); Some(range + contents_range.start()) } } @@ -459,7 +459,7 @@ pub trait HasFormatSpecifier: AstToken { while let Some((r, Ok(next_char))) = chars.peek() { if next_char.is_ascii_digit() { chars.next(); - range = range.extend_to(r); + range = range.cover(*r); } else { break; } @@ -477,7 +477,7 @@ pub trait HasFormatSpecifier: AstToken { while let Some((r, Ok(next_char))) = chars.peek() { if *next_char == '_' || next_char.is_ascii_digit() || next_char.is_alphabetic() { chars.next(); - range = range.extend_to(r); + range = range.cover(*r); } else { break; } @@ -498,10 +498,8 @@ impl HasFormatSpecifier for String { let mut res = Vec::with_capacity(text.len()); rustc_lexer::unescape::unescape_str(text, &mut |range, unescaped_char| { res.push(( - TextRange::from_to( - TextUnit::from_usize(range.start), - TextUnit::from_usize(range.end), - ) + offset, + TextRange::new(TextSize::from_usize(range.start), TextSize::from_usize(range.end)) + + offset, unescaped_char, )) }); @@ -521,10 +519,8 @@ impl HasFormatSpecifier for RawString { let mut res = Vec::with_capacity(text.len()); for (idx, c) in text.char_indices() { res.push(( - TextRange::from_to( - TextUnit::from_usize(idx), - TextUnit::from_usize(idx + c.len_utf8()), - ) + offset, + TextRange::new(TextSize::from_usize(idx), TextSize::from_usize(idx + c.len_utf8())) + + offset, Ok(c), )); } diff --git a/crates/ra_syntax/src/fuzz.rs b/crates/ra_syntax/src/fuzz.rs index 7012df7f0..15aad2205 100644 --- a/crates/ra_syntax/src/fuzz.rs +++ b/crates/ra_syntax/src/fuzz.rs @@ -1,6 +1,6 @@ //! FIXME: write short doc here -use crate::{validation, AstNode, SourceFile, TextRange, TextUnit}; +use crate::{validation, AstNode, SourceFile, TextRange, TextSize}; use ra_text_edit::AtomTextEdit; use std::str::{self, FromStr}; @@ -34,10 +34,8 @@ impl CheckReparse { let text = lines.collect::>().join("\n"); let text = format!("{}{}{}", PREFIX, text, SUFFIX); text.get(delete_start..delete_start.checked_add(delete_len)?)?; // make sure delete is a valid range - let delete = TextRange::offset_len( - TextUnit::from_usize(delete_start), - TextUnit::from_usize(delete_len), - ); + let delete = + TextRange::at(TextSize::from_usize(delete_start), TextSize::from_usize(delete_len)); let edited_text = format!("{}{}{}", &text[..delete_start], &insert, &text[delete_start + delete_len..]); let edit = AtomTextEdit { delete, insert }; diff --git a/crates/ra_syntax/src/lib.rs b/crates/ra_syntax/src/lib.rs index a796e78b1..ceeb2bde9 100644 --- a/crates/ra_syntax/src/lib.rs +++ b/crates/ra_syntax/src/lib.rs @@ -55,7 +55,7 @@ pub use crate::{ }, }; pub use ra_parser::{SyntaxKind, T}; -pub use rowan::{SmolStr, SyntaxText, TextRange, TextUnit, TokenAtOffset, WalkEvent}; +pub use rowan::{SmolStr, SyntaxText, TextRange, TextSize, TokenAtOffset, WalkEvent}; /// `Parse` is the result of the parsing: a syntax tree and a collection of /// errors. @@ -266,7 +266,7 @@ fn api_walkthrough() { assert_eq!(expr_syntax.kind(), SyntaxKind::BIN_EXPR); // And text range: - assert_eq!(expr_syntax.text_range(), TextRange::from_to(32.into(), 37.into())); + assert_eq!(expr_syntax.text_range(), TextRange::new(32.into(), 37.into())); // You can get node's text as a `SyntaxText` object, which will traverse the // tree collecting token's text: diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 67c1f1b48..1fdc76d98 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -4,7 +4,7 @@ use crate::{ SyntaxError, SyntaxKind::{self, *}, - TextRange, TextUnit, T, + TextRange, TextSize, T, }; /// A token of Rust source. @@ -13,7 +13,7 @@ pub struct Token { /// The kind of token. pub kind: SyntaxKind, /// The length of the token. - pub len: TextUnit, + pub len: TextSize, } /// Break a string up into its component tokens. @@ -30,7 +30,7 @@ pub fn tokenize(text: &str) -> (Vec, Vec) { let mut offset: usize = rustc_lexer::strip_shebang(text) .map(|shebang_len| { - tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) }); + tokens.push(Token { kind: SHEBANG, len: TextSize::from_usize(shebang_len) }); shebang_len }) .unwrap_or(0); @@ -38,8 +38,8 @@ pub fn tokenize(text: &str) -> (Vec, Vec) { let text_without_shebang = &text[offset..]; for rustc_token in rustc_lexer::tokenize(text_without_shebang) { - let token_len = TextUnit::from_usize(rustc_token.len); - let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len); + let token_len = TextSize::from_usize(rustc_token.len); + let token_range = TextRange::at(TextSize::from_usize(offset), token_len); let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]); @@ -65,7 +65,7 @@ pub fn tokenize(text: &str) -> (Vec, Vec) { /// Beware that unescape errors are not checked at tokenization time. pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option)> { lex_first_token(text) - .filter(|(token, _)| token.len == TextUnit::of_str(text)) + .filter(|(token, _)| token.len == TextSize::of(text)) .map(|(token, error)| (token.kind, error)) } @@ -75,7 +75,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option Option { lex_first_token(text) - .filter(|(token, error)| !error.is_some() && token.len == TextUnit::of_str(text)) + .filter(|(token, error)| !error.is_some() && token.len == TextSize::of(text)) .map(|(token, _error)| token.kind) } @@ -96,9 +96,9 @@ fn lex_first_token(text: &str) -> Option<(Token, Option)> { let rustc_token = rustc_lexer::first_token(text); let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text); - let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) }; + let token = Token { kind: syntax_kind, len: TextSize::from_usize(rustc_token.len) }; let optional_error = err_message.map(|err_message| { - SyntaxError::new(err_message, TextRange::from_to(0.into(), TextUnit::of_str(text))) + SyntaxError::new(err_message, TextRange::new(0.into(), TextSize::of(text))) }); Some((token, optional_error)) diff --git a/crates/ra_syntax/src/parsing/reparsing.rs b/crates/ra_syntax/src/parsing/reparsing.rs index 2d65b91f1..ffff0a7b2 100644 --- a/crates/ra_syntax/src/parsing/reparsing.rs +++ b/crates/ra_syntax/src/parsing/reparsing.rs @@ -19,7 +19,7 @@ use crate::{ syntax_node::{GreenNode, GreenToken, NodeOrToken, SyntaxElement, SyntaxNode}, SyntaxError, SyntaxKind::*, - TextRange, TextUnit, T, + TextRange, TextSize, T, }; pub(crate) fn incremental_reparse( @@ -176,7 +176,7 @@ fn merge_errors( if old_err_range.end() <= range_before_reparse.start() { res.push(old_err); } else if old_err_range.start() >= range_before_reparse.end() { - let inserted_len = TextUnit::of_str(&edit.insert); + let inserted_len = TextSize::of(&edit.insert); res.push(old_err.with_range((old_err_range + inserted_len) - edit.delete.len())); // Note: extra parens are intentional to prevent uint underflow, HWAB (here was a bug) } diff --git a/crates/ra_syntax/src/parsing/text_token_source.rs b/crates/ra_syntax/src/parsing/text_token_source.rs index e2433913c..7ddc2c2c3 100644 --- a/crates/ra_syntax/src/parsing/text_token_source.rs +++ b/crates/ra_syntax/src/parsing/text_token_source.rs @@ -3,7 +3,7 @@ use ra_parser::Token as PToken; use ra_parser::TokenSource; -use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextUnit}; +use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextSize}; pub(crate) struct TextTokenSource<'t> { text: &'t str, @@ -15,7 +15,7 @@ pub(crate) struct TextTokenSource<'t> { /// 0 7 10 /// ``` /// (token, start_offset): `[(struct, 0), (Foo, 7), (;, 10)]` - start_offsets: Vec, + start_offsets: Vec, /// non-whitespace/comment tokens /// ```non-rust /// struct Foo {} @@ -51,12 +51,12 @@ impl<'t> TokenSource for TextTokenSource<'t> { if pos >= self.tokens.len() { return false; } - let range = TextRange::offset_len(self.start_offsets[pos], self.tokens[pos].len); + let range = TextRange::at(self.start_offsets[pos], self.tokens[pos].len); self.text[range] == *kw } } -fn mk_token(pos: usize, start_offsets: &[TextUnit], tokens: &[Token]) -> PToken { +fn mk_token(pos: usize, start_offsets: &[TextSize], tokens: &[Token]) -> PToken { let kind = tokens.get(pos).map(|t| t.kind).unwrap_or(EOF); let is_jointed_to_next = if pos + 1 < start_offsets.len() { start_offsets[pos] + tokens[pos].len == start_offsets[pos + 1] diff --git a/crates/ra_syntax/src/parsing/text_tree_sink.rs b/crates/ra_syntax/src/parsing/text_tree_sink.rs index 87bb21cd9..22aed1db1 100644 --- a/crates/ra_syntax/src/parsing/text_tree_sink.rs +++ b/crates/ra_syntax/src/parsing/text_tree_sink.rs @@ -9,7 +9,7 @@ use crate::{ syntax_node::GreenNode, SmolStr, SyntaxError, SyntaxKind::{self, *}, - SyntaxTreeBuilder, TextRange, TextUnit, + SyntaxTreeBuilder, TextRange, TextSize, }; /// Bridges the parser with our specific syntax tree representation. @@ -18,7 +18,7 @@ use crate::{ pub(crate) struct TextTreeSink<'a> { text: &'a str, tokens: &'a [Token], - text_pos: TextUnit, + text_pos: TextSize, token_pos: usize, state: State, inner: SyntaxTreeBuilder, @@ -42,7 +42,7 @@ impl<'a> TreeSink for TextTreeSink<'a> { let len = self.tokens[self.token_pos..self.token_pos + n_tokens] .iter() .map(|it| it.len) - .sum::(); + .sum::(); self.do_token(kind, len, n_tokens); } @@ -62,12 +62,12 @@ impl<'a> TreeSink for TextTreeSink<'a> { self.tokens[self.token_pos..].iter().take_while(|it| it.kind.is_trivia()).count(); let leading_trivias = &self.tokens[self.token_pos..self.token_pos + n_trivias]; let mut trivia_end = - self.text_pos + leading_trivias.iter().map(|it| it.len).sum::(); + self.text_pos + leading_trivias.iter().map(|it| it.len).sum::(); let n_attached_trivias = { let leading_trivias = leading_trivias.iter().rev().map(|it| { let next_end = trivia_end - it.len; - let range = TextRange::from_to(next_end, trivia_end); + let range = TextRange::new(next_end, trivia_end); trivia_end = next_end; (it.kind, &self.text[range]) }); @@ -132,8 +132,8 @@ impl<'a> TextTreeSink<'a> { } } - fn do_token(&mut self, kind: SyntaxKind, len: TextUnit, n_tokens: usize) { - let range = TextRange::offset_len(self.text_pos, len); + fn do_token(&mut self, kind: SyntaxKind, len: TextSize, n_tokens: usize) { + let range = TextRange::at(self.text_pos, len); let text: SmolStr = self.text[range].into(); self.text_pos += len; self.token_pos += n_tokens; diff --git a/crates/ra_syntax/src/ptr.rs b/crates/ra_syntax/src/ptr.rs index ecbfffcf4..62f03e93d 100644 --- a/crates/ra_syntax/src/ptr.rs +++ b/crates/ra_syntax/src/ptr.rs @@ -24,7 +24,7 @@ impl SyntaxNodePtr { pub fn to_node(&self, root: &SyntaxNode) -> SyntaxNode { assert!(root.parent().is_none()); successors(Some(root.clone()), |node| { - node.children().find(|it| self.range.is_subrange(&it.text_range())) + node.children().find(|it| it.text_range().contains_range(self.range)) }) .find(|it| it.text_range() == self.range && it.kind() == self.kind) .unwrap_or_else(|| panic!("can't resolve local ptr to SyntaxNode: {:?}", self)) diff --git a/crates/ra_syntax/src/syntax_error.rs b/crates/ra_syntax/src/syntax_error.rs index 54acf7847..7c4511fec 100644 --- a/crates/ra_syntax/src/syntax_error.rs +++ b/crates/ra_syntax/src/syntax_error.rs @@ -2,7 +2,7 @@ use std::fmt; -use crate::{TextRange, TextUnit}; +use crate::{TextRange, TextSize}; /// Represents the result of unsuccessful tokenization, parsing /// or tree validation. @@ -23,8 +23,8 @@ impl SyntaxError { pub fn new(message: impl Into, range: TextRange) -> Self { Self(message.into(), range) } - pub fn new_at_offset(message: impl Into, offset: TextUnit) -> Self { - Self(message.into(), TextRange::offset_len(offset, 0.into())) + pub fn new_at_offset(message: impl Into, offset: TextSize) -> Self { + Self(message.into(), TextRange::empty(offset)) } pub fn range(&self) -> TextRange { diff --git a/crates/ra_syntax/src/syntax_node.rs b/crates/ra_syntax/src/syntax_node.rs index 4e3a1460d..f9d379abf 100644 --- a/crates/ra_syntax/src/syntax_node.rs +++ b/crates/ra_syntax/src/syntax_node.rs @@ -8,7 +8,7 @@ use rowan::{GreenNodeBuilder, Language}; -use crate::{Parse, SmolStr, SyntaxError, SyntaxKind, TextUnit}; +use crate::{Parse, SmolStr, SyntaxError, SyntaxKind, TextSize}; pub(crate) use rowan::{GreenNode, GreenToken}; @@ -69,7 +69,7 @@ impl SyntaxTreeBuilder { self.inner.finish_node() } - pub fn error(&mut self, error: ra_parser::ParseError, text_pos: TextUnit) { + pub fn error(&mut self, error: ra_parser::ParseError, text_pos: TextSize) { self.errors.push(SyntaxError::new_at_offset(error.0, text_pos)) } } diff --git a/crates/ra_syntax/src/tests.rs b/crates/ra_syntax/src/tests.rs index 355843b94..4f2b67feb 100644 --- a/crates/ra_syntax/src/tests.rs +++ b/crates/ra_syntax/src/tests.rs @@ -5,7 +5,7 @@ use std::{ use test_utils::{collect_rust_files, dir_tests, project_dir, read_text}; -use crate::{fuzz, tokenize, SourceFile, SyntaxError, TextRange, TextUnit, Token}; +use crate::{fuzz, tokenize, SourceFile, SyntaxError, TextRange, TextSize, Token}; #[test] fn lexer_tests() { @@ -121,12 +121,12 @@ fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) { fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String { let mut acc = String::new(); - let mut offset = TextUnit::from_usize(0); + let mut offset = TextSize::from_usize(0); for token in tokens { let token_len = token.len; - let token_text = &text[TextRange::offset_len(offset, token.len)]; + let token_text = &text[TextRange::at(offset, token.len)]; offset += token.len; - writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap(); + writeln!(acc, "{:?} {:?} {:?}", token.kind, token_len, token_text).unwrap(); } for err in errors { writeln!(acc, "> error{:?} token({:?}) msg({})", err.range(), &text[err.range()], err) diff --git a/crates/ra_syntax/src/validation.rs b/crates/ra_syntax/src/validation.rs index f85b3e61b..77d7e132d 100644 --- a/crates/ra_syntax/src/validation.rs +++ b/crates/ra_syntax/src/validation.rs @@ -7,7 +7,7 @@ use rustc_lexer::unescape; use crate::{ ast, match_ast, AstNode, SyntaxError, SyntaxKind::{BYTE, BYTE_STRING, CHAR, CONST_DEF, FN_DEF, INT_NUMBER, STRING, TYPE_ALIAS_DEF}, - SyntaxNode, SyntaxToken, TextUnit, T, + SyntaxNode, SyntaxToken, TextSize, T, }; fn rustc_unescape_error_to_string(err: unescape::EscapeError) -> &'static str { @@ -112,7 +112,7 @@ fn validate_literal(literal: ast::Literal, acc: &mut Vec) { // FIXME: lift this lambda refactor to `fn` (https://github.com/rust-analyzer/rust-analyzer/pull/2834#discussion_r366199205) let mut push_err = |prefix_len, (off, err): (usize, unescape::EscapeError)| { - let off = token.text_range().start() + TextUnit::from_usize(off + prefix_len); + let off = token.text_range().start() + TextSize::from_usize(off + prefix_len); acc.push(SyntaxError::new_at_offset(rustc_unescape_error_to_string(err), off)); }; -- cgit v1.2.3 From 63a462f37ca584e1a585a69e30823ce25d4d252f Mon Sep 17 00:00:00 2001 From: Aleksey Kladov Date: Sat, 25 Apr 2020 00:57:47 +0200 Subject: Switch to TryFrom --- crates/ra_syntax/src/ast/tokens.rs | 14 ++++++-------- crates/ra_syntax/src/fuzz.rs | 11 ++++++++--- crates/ra_syntax/src/parsing/lexer.rs | 24 +++++++++++++----------- crates/ra_syntax/src/tests.rs | 2 +- crates/ra_syntax/src/validation.rs | 4 +++- 5 files changed, 31 insertions(+), 24 deletions(-) (limited to 'crates/ra_syntax/src') diff --git a/crates/ra_syntax/src/ast/tokens.rs b/crates/ra_syntax/src/ast/tokens.rs index 26b8f9c36..8e04b0bbd 100644 --- a/crates/ra_syntax/src/ast/tokens.rs +++ b/crates/ra_syntax/src/ast/tokens.rs @@ -1,5 +1,7 @@ //! There are many AstNodes, but only a few tokens, so we hand-write them here. +use std::convert::{TryFrom, TryInto}; + use crate::{ ast::{AstToken, Comment, RawString, String, Whitespace}, TextRange, TextSize, @@ -95,8 +97,8 @@ impl QuoteOffsets { } let start = TextSize::from(0); - let left_quote = TextSize::from_usize(left_quote) + TextSize::of('"'); - let right_quote = TextSize::from_usize(right_quote); + let left_quote = TextSize::try_from(left_quote).unwrap() + TextSize::of('"'); + let right_quote = TextSize::try_from(right_quote).unwrap(); let end = TextSize::of(literal); let res = QuoteOffsets { @@ -498,7 +500,7 @@ impl HasFormatSpecifier for String { let mut res = Vec::with_capacity(text.len()); rustc_lexer::unescape::unescape_str(text, &mut |range, unescaped_char| { res.push(( - TextRange::new(TextSize::from_usize(range.start), TextSize::from_usize(range.end)) + TextRange::new(range.start.try_into().unwrap(), range.end.try_into().unwrap()) + offset, unescaped_char, )) @@ -518,11 +520,7 @@ impl HasFormatSpecifier for RawString { let mut res = Vec::with_capacity(text.len()); for (idx, c) in text.char_indices() { - res.push(( - TextRange::new(TextSize::from_usize(idx), TextSize::from_usize(idx + c.len_utf8())) - + offset, - Ok(c), - )); + res.push((TextRange::at(idx.try_into().unwrap(), TextSize::of(c)) + offset, Ok(c))); } Some(res) } diff --git a/crates/ra_syntax/src/fuzz.rs b/crates/ra_syntax/src/fuzz.rs index 15aad2205..10fbe3176 100644 --- a/crates/ra_syntax/src/fuzz.rs +++ b/crates/ra_syntax/src/fuzz.rs @@ -1,8 +1,13 @@ //! FIXME: write short doc here -use crate::{validation, AstNode, SourceFile, TextRange, TextSize}; +use std::{ + convert::TryInto, + str::{self, FromStr}, +}; + use ra_text_edit::AtomTextEdit; -use std::str::{self, FromStr}; + +use crate::{validation, AstNode, SourceFile, TextRange}; fn check_file_invariants(file: &SourceFile) { let root = file.syntax(); @@ -35,7 +40,7 @@ impl CheckReparse { let text = format!("{}{}{}", PREFIX, text, SUFFIX); text.get(delete_start..delete_start.checked_add(delete_len)?)?; // make sure delete is a valid range let delete = - TextRange::at(TextSize::from_usize(delete_start), TextSize::from_usize(delete_len)); + TextRange::at(delete_start.try_into().unwrap(), delete_len.try_into().unwrap()); let edited_text = format!("{}{}{}", &text[..delete_start], &insert, &text[delete_start + delete_len..]); let edit = AtomTextEdit { delete, insert }; diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 1fdc76d98..f450ef4a2 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -1,6 +1,8 @@ //! Lexer analyzes raw input string and produces lexemes (tokens). //! It is just a bridge to `rustc_lexer`. +use std::convert::TryInto; + use crate::{ SyntaxError, SyntaxKind::{self, *}, @@ -28,18 +30,19 @@ pub fn tokenize(text: &str) -> (Vec, Vec) { let mut tokens = Vec::new(); let mut errors = Vec::new(); - let mut offset: usize = rustc_lexer::strip_shebang(text) - .map(|shebang_len| { - tokens.push(Token { kind: SHEBANG, len: TextSize::from_usize(shebang_len) }); + let mut offset = match rustc_lexer::strip_shebang(text) { + Some(shebang_len) => { + tokens.push(Token { kind: SHEBANG, len: shebang_len.try_into().unwrap() }); shebang_len - }) - .unwrap_or(0); + } + None => 0, + }; let text_without_shebang = &text[offset..]; for rustc_token in rustc_lexer::tokenize(text_without_shebang) { - let token_len = TextSize::from_usize(rustc_token.len); - let token_range = TextRange::at(TextSize::from_usize(offset), token_len); + let token_len: TextSize = rustc_token.len.try_into().unwrap(); + let token_range = TextRange::at(offset.try_into().unwrap(), token_len); let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]); @@ -96,10 +99,9 @@ fn lex_first_token(text: &str) -> Option<(Token, Option)> { let rustc_token = rustc_lexer::first_token(text); let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text); - let token = Token { kind: syntax_kind, len: TextSize::from_usize(rustc_token.len) }; - let optional_error = err_message.map(|err_message| { - SyntaxError::new(err_message, TextRange::new(0.into(), TextSize::of(text))) - }); + let token = Token { kind: syntax_kind, len: rustc_token.len.try_into().unwrap() }; + let optional_error = err_message + .map(|err_message| SyntaxError::new(err_message, TextRange::up_to(TextSize::of(text)))); Some((token, optional_error)) } diff --git a/crates/ra_syntax/src/tests.rs b/crates/ra_syntax/src/tests.rs index 4f2b67feb..aee57db62 100644 --- a/crates/ra_syntax/src/tests.rs +++ b/crates/ra_syntax/src/tests.rs @@ -121,7 +121,7 @@ fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) { fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String { let mut acc = String::new(); - let mut offset = TextSize::from_usize(0); + let mut offset: TextSize = 0.into(); for token in tokens { let token_len = token.len; let token_text = &text[TextRange::at(offset, token.len)]; diff --git a/crates/ra_syntax/src/validation.rs b/crates/ra_syntax/src/validation.rs index 77d7e132d..5e93895ec 100644 --- a/crates/ra_syntax/src/validation.rs +++ b/crates/ra_syntax/src/validation.rs @@ -2,6 +2,8 @@ mod block; +use std::convert::TryFrom; + use rustc_lexer::unescape; use crate::{ @@ -112,7 +114,7 @@ fn validate_literal(literal: ast::Literal, acc: &mut Vec) { // FIXME: lift this lambda refactor to `fn` (https://github.com/rust-analyzer/rust-analyzer/pull/2834#discussion_r366199205) let mut push_err = |prefix_len, (off, err): (usize, unescape::EscapeError)| { - let off = token.text_range().start() + TextSize::from_usize(off + prefix_len); + let off = token.text_range().start() + TextSize::try_from(off + prefix_len).unwrap(); acc.push(SyntaxError::new_at_offset(rustc_unescape_error_to_string(err), off)); }; -- cgit v1.2.3