From 88c944f96b426955933b77ca68c92990734769be Mon Sep 17 00:00:00 2001 From: CAD97 Date: Thu, 12 Mar 2020 22:29:44 -0400 Subject: Remove some TextUnit->usize escapees --- crates/ra_syntax/src/parsing/lexer.rs | 4 ++-- crates/ra_syntax/src/tests.rs | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'crates/ra_syntax') diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index f2684c852..d1baaa607 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs @@ -65,7 +65,7 @@ pub fn tokenize(text: &str) -> (Vec, Vec) { /// Beware that unescape errors are not checked at tokenization time. pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option)> { lex_first_token(text) - .filter(|(token, _)| token.len.to_usize() == text.len()) + .filter(|(token, _)| token.len == TextUnit::of_str(text)) .map(|(token, error)| (token.kind, error)) } @@ -75,7 +75,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option Option { lex_first_token(text) - .filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len()) + .filter(|(token, error)| !error.is_some() && token.len == TextUnit::of_str(text)) .map(|(token, _error)| token.kind) } diff --git a/crates/ra_syntax/src/tests.rs b/crates/ra_syntax/src/tests.rs index 912e6aec0..d331d541e 100644 --- a/crates/ra_syntax/src/tests.rs +++ b/crates/ra_syntax/src/tests.rs @@ -5,7 +5,7 @@ use std::{ use test_utils::{collect_tests, dir_tests, project_dir, read_text}; -use crate::{fuzz, tokenize, SourceFile, SyntaxError, Token}; +use crate::{fuzz, tokenize, SourceFile, SyntaxError, TextRange, TextUnit, Token}; #[test] fn lexer_tests() { @@ -120,11 +120,11 @@ fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) { fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String { let mut acc = String::new(); - let mut offset = 0; + let mut offset = TextUnit::from_usize(0); for token in tokens { - let token_len = token.len.to_usize(); - let token_text = &text[offset..offset + token_len]; - offset += token_len; + let token_len = token.len; + let token_text = &text[TextRange::offset_len(offset, token.len)]; + offset += token.len; writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap(); } for err in errors { -- cgit v1.2.3