diff options
author | CAD97 <[email protected]> | 2020-03-13 02:29:44 +0000 |
---|---|---|
committer | CAD97 <[email protected]> | 2020-03-13 02:33:27 +0000 |
commit | 88c944f96b426955933b77ca68c92990734769be (patch) | |
tree | 23e3903d5d9c7d72c32eb73649dd36b24eba6d38 /crates | |
parent | 2f9f409538553fc709bbcad1a5c76968f36e5968 (diff) |
Remove some TextUnit->usize escapees
Diffstat (limited to 'crates')
-rw-r--r-- | crates/ra_assists/src/lib.rs | 2 | ||||
-rw-r--r-- | crates/ra_ide/src/syntax_tree.rs | 16 | ||||
-rw-r--r-- | crates/ra_ide_db/src/line_index.rs | 14 | ||||
-rw-r--r-- | crates/ra_ide_db/src/line_index_utils.rs | 2 | ||||
-rw-r--r-- | crates/ra_syntax/src/parsing/lexer.rs | 4 | ||||
-rw-r--r-- | crates/ra_syntax/src/tests.rs | 10 | ||||
-rw-r--r-- | crates/ra_text_edit/src/text_edit.rs | 10 |
7 files changed, 31 insertions, 27 deletions
diff --git a/crates/ra_assists/src/lib.rs b/crates/ra_assists/src/lib.rs index 50a15f978..62fadcddd 100644 --- a/crates/ra_assists/src/lib.rs +++ b/crates/ra_assists/src/lib.rs | |||
@@ -235,7 +235,7 @@ mod helpers { | |||
235 | (Some(assist), ExpectedResult::Target(target)) => { | 235 | (Some(assist), ExpectedResult::Target(target)) => { |
236 | let action = assist.0[0].action.clone().unwrap(); | 236 | let action = assist.0[0].action.clone().unwrap(); |
237 | let range = action.target.expect("expected target on action"); | 237 | let range = action.target.expect("expected target on action"); |
238 | assert_eq_text!(&before[range.start().to_usize()..range.end().to_usize()], target); | 238 | assert_eq_text!(&before[range], target); |
239 | } | 239 | } |
240 | (Some(_), ExpectedResult::NotApplicable) => panic!("assist should not be applicable!"), | 240 | (Some(_), ExpectedResult::NotApplicable) => panic!("assist should not be applicable!"), |
241 | (None, ExpectedResult::After(_)) | (None, ExpectedResult::Target(_)) => { | 241 | (None, ExpectedResult::After(_)) | (None, ExpectedResult::Target(_)) => { |
diff --git a/crates/ra_ide/src/syntax_tree.rs b/crates/ra_ide/src/syntax_tree.rs index 55966daf3..f58e436d1 100644 --- a/crates/ra_ide/src/syntax_tree.rs +++ b/crates/ra_ide/src/syntax_tree.rs | |||
@@ -5,7 +5,7 @@ use ra_ide_db::RootDatabase; | |||
5 | use ra_syntax::{ | 5 | use ra_syntax::{ |
6 | algo, AstNode, NodeOrToken, SourceFile, | 6 | algo, AstNode, NodeOrToken, SourceFile, |
7 | SyntaxKind::{RAW_STRING, STRING}, | 7 | SyntaxKind::{RAW_STRING, STRING}, |
8 | SyntaxToken, TextRange, | 8 | SyntaxToken, TextRange, TextUnit, |
9 | }; | 9 | }; |
10 | 10 | ||
11 | pub use ra_db::FileId; | 11 | pub use ra_db::FileId; |
@@ -56,19 +56,23 @@ fn syntax_tree_for_token(node: &SyntaxToken, text_range: TextRange) -> Option<St | |||
56 | let start = text_range.start() - node_range.start(); | 56 | let start = text_range.start() - node_range.start(); |
57 | 57 | ||
58 | // how many characters we have selected | 58 | // how many characters we have selected |
59 | let len = text_range.len().to_usize(); | 59 | let len = text_range.len(); |
60 | 60 | ||
61 | let node_len = node_range.len().to_usize(); | 61 | let node_len = node_range.len(); |
62 | 62 | ||
63 | let start = start.to_usize(); | 63 | let start = start; |
64 | 64 | ||
65 | // We want to cap our length | 65 | // We want to cap our length |
66 | let len = len.min(node_len); | 66 | let len = len.min(node_len); |
67 | 67 | ||
68 | // Ensure our slice is inside the actual string | 68 | // Ensure our slice is inside the actual string |
69 | let end = if start + len < text.len() { start + len } else { text.len() - start }; | 69 | let end = if start + len < TextUnit::of_str(&text) { |
70 | start + len | ||
71 | } else { | ||
72 | TextUnit::of_str(&text) - start | ||
73 | }; | ||
70 | 74 | ||
71 | let text = &text[start..end]; | 75 | let text = &text[TextRange::from_to(start, end)]; |
72 | 76 | ||
73 | // Remove possible extra string quotes from the start | 77 | // Remove possible extra string quotes from the start |
74 | // and the end of the string | 78 | // and the end of the string |
diff --git a/crates/ra_ide_db/src/line_index.rs b/crates/ra_ide_db/src/line_index.rs index b9db5c276..8ae745ff2 100644 --- a/crates/ra_ide_db/src/line_index.rs +++ b/crates/ra_ide_db/src/line_index.rs | |||
@@ -59,7 +59,7 @@ impl LineIndex { | |||
59 | } | 59 | } |
60 | 60 | ||
61 | let char_len = TextUnit::of_char(c); | 61 | let char_len = TextUnit::of_char(c); |
62 | if char_len.to_usize() > 1 { | 62 | if char_len > TextUnit::from_usize(1) { |
63 | utf16_chars.push(Utf16Char { start: curr_col, end: curr_col + char_len }); | 63 | utf16_chars.push(Utf16Char { start: curr_col, end: curr_col + char_len }); |
64 | } | 64 | } |
65 | 65 | ||
@@ -101,12 +101,12 @@ impl LineIndex { | |||
101 | .filter(|it| !it.is_empty()) | 101 | .filter(|it| !it.is_empty()) |
102 | } | 102 | } |
103 | 103 | ||
104 | fn utf8_to_utf16_col(&self, line: u32, mut col: TextUnit) -> usize { | 104 | fn utf8_to_utf16_col(&self, line: u32, col: TextUnit) -> usize { |
105 | if let Some(utf16_chars) = self.utf16_lines.get(&line) { | 105 | if let Some(utf16_chars) = self.utf16_lines.get(&line) { |
106 | let mut correction = TextUnit::from_usize(0); | 106 | let mut correction = 0; |
107 | for c in utf16_chars { | 107 | for c in utf16_chars { |
108 | if col >= c.end { | 108 | if col >= c.end { |
109 | correction += c.len() - TextUnit::from_usize(1); | 109 | correction += c.len().to_usize() - 1; |
110 | } else { | 110 | } else { |
111 | // From here on, all utf16 characters come *after* the character we are mapping, | 111 | // From here on, all utf16 characters come *after* the character we are mapping, |
112 | // so we don't need to take them into account | 112 | // so we don't need to take them into account |
@@ -114,10 +114,10 @@ impl LineIndex { | |||
114 | } | 114 | } |
115 | } | 115 | } |
116 | 116 | ||
117 | col -= correction; | 117 | col.to_usize() - correction |
118 | } else { | ||
119 | col.to_usize() | ||
118 | } | 120 | } |
119 | |||
120 | col.to_usize() | ||
121 | } | 121 | } |
122 | 122 | ||
123 | fn utf16_to_utf8_col(&self, line: u32, col: u32) -> TextUnit { | 123 | fn utf16_to_utf8_col(&self, line: u32, col: u32) -> TextUnit { |
diff --git a/crates/ra_ide_db/src/line_index_utils.rs b/crates/ra_ide_db/src/line_index_utils.rs index 75a498151..2ebbabdc6 100644 --- a/crates/ra_ide_db/src/line_index_utils.rs +++ b/crates/ra_ide_db/src/line_index_utils.rs | |||
@@ -145,7 +145,7 @@ impl Iterator for OffsetStepIter<'_> { | |||
145 | Some((next, next_offset)) | 145 | Some((next, next_offset)) |
146 | } else { | 146 | } else { |
147 | let char_len = TextUnit::of_char(c); | 147 | let char_len = TextUnit::of_char(c); |
148 | if char_len.to_usize() > 1 { | 148 | if char_len > TextUnit::from_usize(1) { |
149 | let start = self.offset + TextUnit::from_usize(i); | 149 | let start = self.offset + TextUnit::from_usize(i); |
150 | let end = start + char_len; | 150 | let end = start + char_len; |
151 | let next = Step::Utf16Char(TextRange::from_to(start, end)); | 151 | let next = Step::Utf16Char(TextRange::from_to(start, end)); |
diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index f2684c852..d1baaa607 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs | |||
@@ -65,7 +65,7 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) { | |||
65 | /// Beware that unescape errors are not checked at tokenization time. | 65 | /// Beware that unescape errors are not checked at tokenization time. |
66 | pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> { | 66 | pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> { |
67 | lex_first_token(text) | 67 | lex_first_token(text) |
68 | .filter(|(token, _)| token.len.to_usize() == text.len()) | 68 | .filter(|(token, _)| token.len == TextUnit::of_str(text)) |
69 | .map(|(token, error)| (token.kind, error)) | 69 | .map(|(token, error)| (token.kind, error)) |
70 | } | 70 | } |
71 | 71 | ||
@@ -75,7 +75,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr | |||
75 | /// Beware that unescape errors are not checked at tokenization time. | 75 | /// Beware that unescape errors are not checked at tokenization time. |
76 | pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> { | 76 | pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> { |
77 | lex_first_token(text) | 77 | lex_first_token(text) |
78 | .filter(|(token, error)| !error.is_some() && token.len.to_usize() == text.len()) | 78 | .filter(|(token, error)| !error.is_some() && token.len == TextUnit::of_str(text)) |
79 | .map(|(token, _error)| token.kind) | 79 | .map(|(token, _error)| token.kind) |
80 | } | 80 | } |
81 | 81 | ||
diff --git a/crates/ra_syntax/src/tests.rs b/crates/ra_syntax/src/tests.rs index 912e6aec0..d331d541e 100644 --- a/crates/ra_syntax/src/tests.rs +++ b/crates/ra_syntax/src/tests.rs | |||
@@ -5,7 +5,7 @@ use std::{ | |||
5 | 5 | ||
6 | use test_utils::{collect_tests, dir_tests, project_dir, read_text}; | 6 | use test_utils::{collect_tests, dir_tests, project_dir, read_text}; |
7 | 7 | ||
8 | use crate::{fuzz, tokenize, SourceFile, SyntaxError, Token}; | 8 | use crate::{fuzz, tokenize, SourceFile, SyntaxError, TextRange, TextUnit, Token}; |
9 | 9 | ||
10 | #[test] | 10 | #[test] |
11 | fn lexer_tests() { | 11 | fn lexer_tests() { |
@@ -120,11 +120,11 @@ fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) { | |||
120 | 120 | ||
121 | fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String { | 121 | fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String { |
122 | let mut acc = String::new(); | 122 | let mut acc = String::new(); |
123 | let mut offset = 0; | 123 | let mut offset = TextUnit::from_usize(0); |
124 | for token in tokens { | 124 | for token in tokens { |
125 | let token_len = token.len.to_usize(); | 125 | let token_len = token.len; |
126 | let token_text = &text[offset..offset + token_len]; | 126 | let token_text = &text[TextRange::offset_len(offset, token.len)]; |
127 | offset += token_len; | 127 | offset += token.len; |
128 | writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap(); | 128 | writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap(); |
129 | } | 129 | } |
130 | for err in errors { | 130 | for err in errors { |
diff --git a/crates/ra_text_edit/src/text_edit.rs b/crates/ra_text_edit/src/text_edit.rs index 3291ada42..5c37a08a8 100644 --- a/crates/ra_text_edit/src/text_edit.rs +++ b/crates/ra_text_edit/src/text_edit.rs | |||
@@ -63,12 +63,12 @@ impl TextEdit { | |||
63 | } | 63 | } |
64 | 64 | ||
65 | pub fn apply(&self, text: &str) -> String { | 65 | pub fn apply(&self, text: &str) -> String { |
66 | let mut total_len = text.len(); | 66 | let mut total_len = TextUnit::of_str(text); |
67 | for atom in self.atoms.iter() { | 67 | for atom in self.atoms.iter() { |
68 | total_len += atom.insert.len(); | 68 | total_len += TextUnit::of_str(&atom.insert); |
69 | total_len -= (atom.delete.end() - atom.delete.start()).to_usize(); | 69 | total_len -= atom.delete.end() - atom.delete.start(); |
70 | } | 70 | } |
71 | let mut buf = String::with_capacity(total_len); | 71 | let mut buf = String::with_capacity(total_len.to_usize()); |
72 | let mut prev = 0; | 72 | let mut prev = 0; |
73 | for atom in self.atoms.iter() { | 73 | for atom in self.atoms.iter() { |
74 | let start = atom.delete.start().to_usize(); | 74 | let start = atom.delete.start().to_usize(); |
@@ -80,7 +80,7 @@ impl TextEdit { | |||
80 | prev = end; | 80 | prev = end; |
81 | } | 81 | } |
82 | buf.push_str(&text[prev..text.len()]); | 82 | buf.push_str(&text[prev..text.len()]); |
83 | assert_eq!(buf.len(), total_len); | 83 | assert_eq!(TextUnit::of_str(&buf), total_len); |
84 | buf | 84 | buf |
85 | } | 85 | } |
86 | 86 | ||