aboutsummaryrefslogtreecommitdiff
path: root/crates/ra_syntax/src/parsing
diff options
context:
space:
mode:
authorAleksey Kladov <[email protected]>2020-04-24 22:40:41 +0100
committerAleksey Kladov <[email protected]>2020-04-25 10:59:18 +0100
commitb1d5817dd18b7b5fc102a63b084b1ee7ff4f9996 (patch)
treee5d136c5ba4a6ba96aeeb423e6e3f64ca7cea3f9 /crates/ra_syntax/src/parsing
parent27a7718880d93f55f905da606d108d3b3c682ab4 (diff)
Convert code to text-size
Diffstat (limited to 'crates/ra_syntax/src/parsing')
-rw-r--r--crates/ra_syntax/src/parsing/lexer.rs18
-rw-r--r--crates/ra_syntax/src/parsing/reparsing.rs4
-rw-r--r--crates/ra_syntax/src/parsing/text_token_source.rs8
-rw-r--r--crates/ra_syntax/src/parsing/text_tree_sink.rs14
4 files changed, 22 insertions, 22 deletions
diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs
index 67c1f1b48..1fdc76d98 100644
--- a/crates/ra_syntax/src/parsing/lexer.rs
+++ b/crates/ra_syntax/src/parsing/lexer.rs
@@ -4,7 +4,7 @@
4use crate::{ 4use crate::{
5 SyntaxError, 5 SyntaxError,
6 SyntaxKind::{self, *}, 6 SyntaxKind::{self, *},
7 TextRange, TextUnit, T, 7 TextRange, TextSize, T,
8}; 8};
9 9
10/// A token of Rust source. 10/// A token of Rust source.
@@ -13,7 +13,7 @@ pub struct Token {
13 /// The kind of token. 13 /// The kind of token.
14 pub kind: SyntaxKind, 14 pub kind: SyntaxKind,
15 /// The length of the token. 15 /// The length of the token.
16 pub len: TextUnit, 16 pub len: TextSize,
17} 17}
18 18
19/// Break a string up into its component tokens. 19/// Break a string up into its component tokens.
@@ -30,7 +30,7 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
30 30
31 let mut offset: usize = rustc_lexer::strip_shebang(text) 31 let mut offset: usize = rustc_lexer::strip_shebang(text)
32 .map(|shebang_len| { 32 .map(|shebang_len| {
33 tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) }); 33 tokens.push(Token { kind: SHEBANG, len: TextSize::from_usize(shebang_len) });
34 shebang_len 34 shebang_len
35 }) 35 })
36 .unwrap_or(0); 36 .unwrap_or(0);
@@ -38,8 +38,8 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
38 let text_without_shebang = &text[offset..]; 38 let text_without_shebang = &text[offset..];
39 39
40 for rustc_token in rustc_lexer::tokenize(text_without_shebang) { 40 for rustc_token in rustc_lexer::tokenize(text_without_shebang) {
41 let token_len = TextUnit::from_usize(rustc_token.len); 41 let token_len = TextSize::from_usize(rustc_token.len);
42 let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len); 42 let token_range = TextRange::at(TextSize::from_usize(offset), token_len);
43 43
44 let (syntax_kind, err_message) = 44 let (syntax_kind, err_message) =
45 rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]); 45 rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]);
@@ -65,7 +65,7 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
65/// Beware that unescape errors are not checked at tokenization time. 65/// Beware that unescape errors are not checked at tokenization time.
66pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> { 66pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
67 lex_first_token(text) 67 lex_first_token(text)
68 .filter(|(token, _)| token.len == TextUnit::of_str(text)) 68 .filter(|(token, _)| token.len == TextSize::of(text))
69 .map(|(token, error)| (token.kind, error)) 69 .map(|(token, error)| (token.kind, error))
70} 70}
71 71
@@ -75,7 +75,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr
75/// Beware that unescape errors are not checked at tokenization time. 75/// Beware that unescape errors are not checked at tokenization time.
76pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> { 76pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
77 lex_first_token(text) 77 lex_first_token(text)
78 .filter(|(token, error)| !error.is_some() && token.len == TextUnit::of_str(text)) 78 .filter(|(token, error)| !error.is_some() && token.len == TextSize::of(text))
79 .map(|(token, _error)| token.kind) 79 .map(|(token, _error)| token.kind)
80} 80}
81 81
@@ -96,9 +96,9 @@ fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> {
96 let rustc_token = rustc_lexer::first_token(text); 96 let rustc_token = rustc_lexer::first_token(text);
97 let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text); 97 let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text);
98 98
99 let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) }; 99 let token = Token { kind: syntax_kind, len: TextSize::from_usize(rustc_token.len) };
100 let optional_error = err_message.map(|err_message| { 100 let optional_error = err_message.map(|err_message| {
101 SyntaxError::new(err_message, TextRange::from_to(0.into(), TextUnit::of_str(text))) 101 SyntaxError::new(err_message, TextRange::new(0.into(), TextSize::of(text)))
102 }); 102 });
103 103
104 Some((token, optional_error)) 104 Some((token, optional_error))
diff --git a/crates/ra_syntax/src/parsing/reparsing.rs b/crates/ra_syntax/src/parsing/reparsing.rs
index 2d65b91f1..ffff0a7b2 100644
--- a/crates/ra_syntax/src/parsing/reparsing.rs
+++ b/crates/ra_syntax/src/parsing/reparsing.rs
@@ -19,7 +19,7 @@ use crate::{
19 syntax_node::{GreenNode, GreenToken, NodeOrToken, SyntaxElement, SyntaxNode}, 19 syntax_node::{GreenNode, GreenToken, NodeOrToken, SyntaxElement, SyntaxNode},
20 SyntaxError, 20 SyntaxError,
21 SyntaxKind::*, 21 SyntaxKind::*,
22 TextRange, TextUnit, T, 22 TextRange, TextSize, T,
23}; 23};
24 24
25pub(crate) fn incremental_reparse( 25pub(crate) fn incremental_reparse(
@@ -176,7 +176,7 @@ fn merge_errors(
176 if old_err_range.end() <= range_before_reparse.start() { 176 if old_err_range.end() <= range_before_reparse.start() {
177 res.push(old_err); 177 res.push(old_err);
178 } else if old_err_range.start() >= range_before_reparse.end() { 178 } else if old_err_range.start() >= range_before_reparse.end() {
179 let inserted_len = TextUnit::of_str(&edit.insert); 179 let inserted_len = TextSize::of(&edit.insert);
180 res.push(old_err.with_range((old_err_range + inserted_len) - edit.delete.len())); 180 res.push(old_err.with_range((old_err_range + inserted_len) - edit.delete.len()));
181 // Note: extra parens are intentional to prevent uint underflow, HWAB (here was a bug) 181 // Note: extra parens are intentional to prevent uint underflow, HWAB (here was a bug)
182 } 182 }
diff --git a/crates/ra_syntax/src/parsing/text_token_source.rs b/crates/ra_syntax/src/parsing/text_token_source.rs
index e2433913c..7ddc2c2c3 100644
--- a/crates/ra_syntax/src/parsing/text_token_source.rs
+++ b/crates/ra_syntax/src/parsing/text_token_source.rs
@@ -3,7 +3,7 @@
3use ra_parser::Token as PToken; 3use ra_parser::Token as PToken;
4use ra_parser::TokenSource; 4use ra_parser::TokenSource;
5 5
6use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextUnit}; 6use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextSize};
7 7
8pub(crate) struct TextTokenSource<'t> { 8pub(crate) struct TextTokenSource<'t> {
9 text: &'t str, 9 text: &'t str,
@@ -15,7 +15,7 @@ pub(crate) struct TextTokenSource<'t> {
15 /// 0 7 10 15 /// 0 7 10
16 /// ``` 16 /// ```
17 /// (token, start_offset): `[(struct, 0), (Foo, 7), (;, 10)]` 17 /// (token, start_offset): `[(struct, 0), (Foo, 7), (;, 10)]`
18 start_offsets: Vec<TextUnit>, 18 start_offsets: Vec<TextSize>,
19 /// non-whitespace/comment tokens 19 /// non-whitespace/comment tokens
20 /// ```non-rust 20 /// ```non-rust
21 /// struct Foo {} 21 /// struct Foo {}
@@ -51,12 +51,12 @@ impl<'t> TokenSource for TextTokenSource<'t> {
51 if pos >= self.tokens.len() { 51 if pos >= self.tokens.len() {
52 return false; 52 return false;
53 } 53 }
54 let range = TextRange::offset_len(self.start_offsets[pos], self.tokens[pos].len); 54 let range = TextRange::at(self.start_offsets[pos], self.tokens[pos].len);
55 self.text[range] == *kw 55 self.text[range] == *kw
56 } 56 }
57} 57}
58 58
59fn mk_token(pos: usize, start_offsets: &[TextUnit], tokens: &[Token]) -> PToken { 59fn mk_token(pos: usize, start_offsets: &[TextSize], tokens: &[Token]) -> PToken {
60 let kind = tokens.get(pos).map(|t| t.kind).unwrap_or(EOF); 60 let kind = tokens.get(pos).map(|t| t.kind).unwrap_or(EOF);
61 let is_jointed_to_next = if pos + 1 < start_offsets.len() { 61 let is_jointed_to_next = if pos + 1 < start_offsets.len() {
62 start_offsets[pos] + tokens[pos].len == start_offsets[pos + 1] 62 start_offsets[pos] + tokens[pos].len == start_offsets[pos + 1]
diff --git a/crates/ra_syntax/src/parsing/text_tree_sink.rs b/crates/ra_syntax/src/parsing/text_tree_sink.rs
index 87bb21cd9..22aed1db1 100644
--- a/crates/ra_syntax/src/parsing/text_tree_sink.rs
+++ b/crates/ra_syntax/src/parsing/text_tree_sink.rs
@@ -9,7 +9,7 @@ use crate::{
9 syntax_node::GreenNode, 9 syntax_node::GreenNode,
10 SmolStr, SyntaxError, 10 SmolStr, SyntaxError,
11 SyntaxKind::{self, *}, 11 SyntaxKind::{self, *},
12 SyntaxTreeBuilder, TextRange, TextUnit, 12 SyntaxTreeBuilder, TextRange, TextSize,
13}; 13};
14 14
15/// Bridges the parser with our specific syntax tree representation. 15/// Bridges the parser with our specific syntax tree representation.
@@ -18,7 +18,7 @@ use crate::{
18pub(crate) struct TextTreeSink<'a> { 18pub(crate) struct TextTreeSink<'a> {
19 text: &'a str, 19 text: &'a str,
20 tokens: &'a [Token], 20 tokens: &'a [Token],
21 text_pos: TextUnit, 21 text_pos: TextSize,
22 token_pos: usize, 22 token_pos: usize,
23 state: State, 23 state: State,
24 inner: SyntaxTreeBuilder, 24 inner: SyntaxTreeBuilder,
@@ -42,7 +42,7 @@ impl<'a> TreeSink for TextTreeSink<'a> {
42 let len = self.tokens[self.token_pos..self.token_pos + n_tokens] 42 let len = self.tokens[self.token_pos..self.token_pos + n_tokens]
43 .iter() 43 .iter()
44 .map(|it| it.len) 44 .map(|it| it.len)
45 .sum::<TextUnit>(); 45 .sum::<TextSize>();
46 self.do_token(kind, len, n_tokens); 46 self.do_token(kind, len, n_tokens);
47 } 47 }
48 48
@@ -62,12 +62,12 @@ impl<'a> TreeSink for TextTreeSink<'a> {
62 self.tokens[self.token_pos..].iter().take_while(|it| it.kind.is_trivia()).count(); 62 self.tokens[self.token_pos..].iter().take_while(|it| it.kind.is_trivia()).count();
63 let leading_trivias = &self.tokens[self.token_pos..self.token_pos + n_trivias]; 63 let leading_trivias = &self.tokens[self.token_pos..self.token_pos + n_trivias];
64 let mut trivia_end = 64 let mut trivia_end =
65 self.text_pos + leading_trivias.iter().map(|it| it.len).sum::<TextUnit>(); 65 self.text_pos + leading_trivias.iter().map(|it| it.len).sum::<TextSize>();
66 66
67 let n_attached_trivias = { 67 let n_attached_trivias = {
68 let leading_trivias = leading_trivias.iter().rev().map(|it| { 68 let leading_trivias = leading_trivias.iter().rev().map(|it| {
69 let next_end = trivia_end - it.len; 69 let next_end = trivia_end - it.len;
70 let range = TextRange::from_to(next_end, trivia_end); 70 let range = TextRange::new(next_end, trivia_end);
71 trivia_end = next_end; 71 trivia_end = next_end;
72 (it.kind, &self.text[range]) 72 (it.kind, &self.text[range])
73 }); 73 });
@@ -132,8 +132,8 @@ impl<'a> TextTreeSink<'a> {
132 } 132 }
133 } 133 }
134 134
135 fn do_token(&mut self, kind: SyntaxKind, len: TextUnit, n_tokens: usize) { 135 fn do_token(&mut self, kind: SyntaxKind, len: TextSize, n_tokens: usize) {
136 let range = TextRange::offset_len(self.text_pos, len); 136 let range = TextRange::at(self.text_pos, len);
137 let text: SmolStr = self.text[range].into(); 137 let text: SmolStr = self.text[range].into();
138 self.text_pos += len; 138 self.text_pos += len;
139 self.token_pos += n_tokens; 139 self.token_pos += n_tokens;