diff options
Diffstat (limited to 'crates')
73 files changed, 378 insertions, 392 deletions
diff --git a/crates/ra_assists/src/assist_ctx.rs b/crates/ra_assists/src/assist_ctx.rs index 279163257..2fe7c3de3 100644 --- a/crates/ra_assists/src/assist_ctx.rs +++ b/crates/ra_assists/src/assist_ctx.rs | |||
@@ -5,7 +5,7 @@ use ra_fmt::{leading_indent, reindent}; | |||
5 | use ra_ide_db::RootDatabase; | 5 | use ra_ide_db::RootDatabase; |
6 | use ra_syntax::{ | 6 | use ra_syntax::{ |
7 | algo::{self, find_covering_element, find_node_at_offset}, | 7 | algo::{self, find_covering_element, find_node_at_offset}, |
8 | AstNode, SourceFile, SyntaxElement, SyntaxKind, SyntaxNode, SyntaxToken, TextRange, TextUnit, | 8 | AstNode, SourceFile, SyntaxElement, SyntaxKind, SyntaxNode, SyntaxToken, TextRange, TextSize, |
9 | TokenAtOffset, | 9 | TokenAtOffset, |
10 | }; | 10 | }; |
11 | use ra_text_edit::TextEditBuilder; | 11 | use ra_text_edit::TextEditBuilder; |
@@ -178,7 +178,7 @@ impl<'a> AssistGroup<'a> { | |||
178 | #[derive(Default)] | 178 | #[derive(Default)] |
179 | pub(crate) struct ActionBuilder { | 179 | pub(crate) struct ActionBuilder { |
180 | edit: TextEditBuilder, | 180 | edit: TextEditBuilder, |
181 | cursor_position: Option<TextUnit>, | 181 | cursor_position: Option<TextSize>, |
182 | target: Option<TextRange>, | 182 | target: Option<TextRange>, |
183 | file: AssistFile, | 183 | file: AssistFile, |
184 | } | 184 | } |
@@ -211,12 +211,12 @@ impl ActionBuilder { | |||
211 | } | 211 | } |
212 | 212 | ||
213 | /// Append specified `text` at the given `offset` | 213 | /// Append specified `text` at the given `offset` |
214 | pub(crate) fn insert(&mut self, offset: TextUnit, text: impl Into<String>) { | 214 | pub(crate) fn insert(&mut self, offset: TextSize, text: impl Into<String>) { |
215 | self.edit.insert(offset, text.into()) | 215 | self.edit.insert(offset, text.into()) |
216 | } | 216 | } |
217 | 217 | ||
218 | /// Specify desired position of the cursor after the assist is applied. | 218 | /// Specify desired position of the cursor after the assist is applied. |
219 | pub(crate) fn set_cursor(&mut self, offset: TextUnit) { | 219 | pub(crate) fn set_cursor(&mut self, offset: TextSize) { |
220 | self.cursor_position = Some(offset) | 220 | self.cursor_position = Some(offset) |
221 | } | 221 | } |
222 | 222 | ||
diff --git a/crates/ra_assists/src/handlers/add_custom_impl.rs b/crates/ra_assists/src/handlers/add_custom_impl.rs index 15f9b216b..7bb90dba3 100644 --- a/crates/ra_assists/src/handlers/add_custom_impl.rs +++ b/crates/ra_assists/src/handlers/add_custom_impl.rs | |||
@@ -2,7 +2,7 @@ use ra_syntax::{ | |||
2 | ast::{self, AstNode}, | 2 | ast::{self, AstNode}, |
3 | Direction, SmolStr, | 3 | Direction, SmolStr, |
4 | SyntaxKind::{IDENT, WHITESPACE}, | 4 | SyntaxKind::{IDENT, WHITESPACE}, |
5 | TextRange, TextUnit, | 5 | TextRange, TextSize, |
6 | }; | 6 | }; |
7 | use stdx::SepBy; | 7 | use stdx::SepBy; |
8 | 8 | ||
@@ -71,7 +71,7 @@ pub(crate) fn add_custom_impl(ctx: AssistCtx) -> Option<Assist> { | |||
71 | 71 | ||
72 | let cursor_delta = if has_more_derives { | 72 | let cursor_delta = if has_more_derives { |
73 | edit.replace(input.syntax().text_range(), new_attr_input); | 73 | edit.replace(input.syntax().text_range(), new_attr_input); |
74 | input.syntax().text_range().len() - TextUnit::from_usize(new_attr_input_len) | 74 | input.syntax().text_range().len() - TextSize::from_usize(new_attr_input_len) |
75 | } else { | 75 | } else { |
76 | let attr_range = attr.syntax().text_range(); | 76 | let attr_range = attr.syntax().text_range(); |
77 | edit.delete(attr_range); | 77 | edit.delete(attr_range); |
@@ -81,13 +81,13 @@ pub(crate) fn add_custom_impl(ctx: AssistCtx) -> Option<Assist> { | |||
81 | .next_sibling_or_token() | 81 | .next_sibling_or_token() |
82 | .filter(|t| t.kind() == WHITESPACE) | 82 | .filter(|t| t.kind() == WHITESPACE) |
83 | .map(|t| t.text_range()) | 83 | .map(|t| t.text_range()) |
84 | .unwrap_or_else(|| TextRange::from_to(TextUnit::from(0), TextUnit::from(0))); | 84 | .unwrap_or_else(|| TextRange::new(TextSize::from(0), TextSize::from(0))); |
85 | edit.delete(line_break_range); | 85 | edit.delete(line_break_range); |
86 | 86 | ||
87 | attr_range.len() + line_break_range.len() | 87 | attr_range.len() + line_break_range.len() |
88 | }; | 88 | }; |
89 | 89 | ||
90 | edit.set_cursor(start_offset + TextUnit::of_str(&buf) - cursor_delta); | 90 | edit.set_cursor(start_offset + TextSize::of(&buf) - cursor_delta); |
91 | buf.push_str("\n}"); | 91 | buf.push_str("\n}"); |
92 | edit.insert(start_offset, buf); | 92 | edit.insert(start_offset, buf); |
93 | }) | 93 | }) |
diff --git a/crates/ra_assists/src/handlers/add_derive.rs b/crates/ra_assists/src/handlers/add_derive.rs index b0d1a0a80..6254eb7c4 100644 --- a/crates/ra_assists/src/handlers/add_derive.rs +++ b/crates/ra_assists/src/handlers/add_derive.rs | |||
@@ -1,7 +1,7 @@ | |||
1 | use ra_syntax::{ | 1 | use ra_syntax::{ |
2 | ast::{self, AstNode, AttrsOwner}, | 2 | ast::{self, AstNode, AttrsOwner}, |
3 | SyntaxKind::{COMMENT, WHITESPACE}, | 3 | SyntaxKind::{COMMENT, WHITESPACE}, |
4 | TextUnit, | 4 | TextSize, |
5 | }; | 5 | }; |
6 | 6 | ||
7 | use crate::{Assist, AssistCtx, AssistId}; | 7 | use crate::{Assist, AssistCtx, AssistId}; |
@@ -37,9 +37,9 @@ pub(crate) fn add_derive(ctx: AssistCtx) -> Option<Assist> { | |||
37 | let offset = match derive_attr { | 37 | let offset = match derive_attr { |
38 | None => { | 38 | None => { |
39 | edit.insert(node_start, "#[derive()]\n"); | 39 | edit.insert(node_start, "#[derive()]\n"); |
40 | node_start + TextUnit::of_str("#[derive(") | 40 | node_start + TextSize::of("#[derive(") |
41 | } | 41 | } |
42 | Some(tt) => tt.syntax().text_range().end() - TextUnit::of_char(')'), | 42 | Some(tt) => tt.syntax().text_range().end() - TextSize::of(')'), |
43 | }; | 43 | }; |
44 | edit.target(nominal.syntax().text_range()); | 44 | edit.target(nominal.syntax().text_range()); |
45 | edit.set_cursor(offset) | 45 | edit.set_cursor(offset) |
@@ -47,7 +47,7 @@ pub(crate) fn add_derive(ctx: AssistCtx) -> Option<Assist> { | |||
47 | } | 47 | } |
48 | 48 | ||
49 | // Insert `derive` after doc comments. | 49 | // Insert `derive` after doc comments. |
50 | fn derive_insertion_offset(nominal: &ast::NominalDef) -> Option<TextUnit> { | 50 | fn derive_insertion_offset(nominal: &ast::NominalDef) -> Option<TextSize> { |
51 | let non_ws_child = nominal | 51 | let non_ws_child = nominal |
52 | .syntax() | 52 | .syntax() |
53 | .children_with_tokens() | 53 | .children_with_tokens() |
diff --git a/crates/ra_assists/src/handlers/add_explicit_type.rs b/crates/ra_assists/src/handlers/add_explicit_type.rs index 6c56d93d8..bc313782b 100644 --- a/crates/ra_assists/src/handlers/add_explicit_type.rs +++ b/crates/ra_assists/src/handlers/add_explicit_type.rs | |||
@@ -37,8 +37,8 @@ pub(crate) fn add_explicit_type(ctx: AssistCtx) -> Option<Assist> { | |||
37 | let stmt_range = stmt.syntax().text_range(); | 37 | let stmt_range = stmt.syntax().text_range(); |
38 | let eq_range = stmt.eq_token()?.text_range(); | 38 | let eq_range = stmt.eq_token()?.text_range(); |
39 | // Assist should only be applicable if cursor is between 'let' and '=' | 39 | // Assist should only be applicable if cursor is between 'let' and '=' |
40 | let let_range = TextRange::from_to(stmt_range.start(), eq_range.start()); | 40 | let let_range = TextRange::new(stmt_range.start(), eq_range.start()); |
41 | let cursor_in_range = ctx.frange.range.is_subrange(&let_range); | 41 | let cursor_in_range = let_range.contains_range(ctx.frange.range); |
42 | if !cursor_in_range { | 42 | if !cursor_in_range { |
43 | return None; | 43 | return None; |
44 | } | 44 | } |
diff --git a/crates/ra_assists/src/handlers/add_from_impl_for_enum.rs b/crates/ra_assists/src/handlers/add_from_impl_for_enum.rs index 0621487e8..03806724a 100644 --- a/crates/ra_assists/src/handlers/add_from_impl_for_enum.rs +++ b/crates/ra_assists/src/handlers/add_from_impl_for_enum.rs | |||
@@ -1,6 +1,6 @@ | |||
1 | use ra_syntax::{ | 1 | use ra_syntax::{ |
2 | ast::{self, AstNode, NameOwner}, | 2 | ast::{self, AstNode, NameOwner}, |
3 | TextUnit, | 3 | TextSize, |
4 | }; | 4 | }; |
5 | use stdx::format_to; | 5 | use stdx::format_to; |
6 | 6 | ||
@@ -65,7 +65,7 @@ impl From<{0}> for {1} {{ | |||
65 | variant_name | 65 | variant_name |
66 | ); | 66 | ); |
67 | edit.insert(start_offset, buf); | 67 | edit.insert(start_offset, buf); |
68 | edit.set_cursor(start_offset + TextUnit::of_str("\n\n")); | 68 | edit.set_cursor(start_offset + TextSize::of("\n\n")); |
69 | }, | 69 | }, |
70 | ) | 70 | ) |
71 | } | 71 | } |
diff --git a/crates/ra_assists/src/handlers/add_function.rs b/crates/ra_assists/src/handlers/add_function.rs index f185cffdb..7a8f5705f 100644 --- a/crates/ra_assists/src/handlers/add_function.rs +++ b/crates/ra_assists/src/handlers/add_function.rs | |||
@@ -1,6 +1,6 @@ | |||
1 | use ra_syntax::{ | 1 | use ra_syntax::{ |
2 | ast::{self, AstNode}, | 2 | ast::{self, AstNode}, |
3 | SyntaxKind, SyntaxNode, TextUnit, | 3 | SyntaxKind, SyntaxNode, TextSize, |
4 | }; | 4 | }; |
5 | 5 | ||
6 | use crate::{Assist, AssistCtx, AssistFile, AssistId}; | 6 | use crate::{Assist, AssistCtx, AssistFile, AssistId}; |
@@ -69,8 +69,8 @@ pub(crate) fn add_function(ctx: AssistCtx) -> Option<Assist> { | |||
69 | } | 69 | } |
70 | 70 | ||
71 | struct FunctionTemplate { | 71 | struct FunctionTemplate { |
72 | insert_offset: TextUnit, | 72 | insert_offset: TextSize, |
73 | cursor_offset: TextUnit, | 73 | cursor_offset: TextSize, |
74 | fn_def: ast::SourceFile, | 74 | fn_def: ast::SourceFile, |
75 | file: AssistFile, | 75 | file: AssistFile, |
76 | } | 76 | } |
@@ -129,7 +129,7 @@ impl FunctionBuilder { | |||
129 | let fn_def = indent_once.increase_indent(fn_def); | 129 | let fn_def = indent_once.increase_indent(fn_def); |
130 | let fn_def = ast::make::add_trailing_newlines(1, fn_def); | 130 | let fn_def = ast::make::add_trailing_newlines(1, fn_def); |
131 | let fn_def = indent.increase_indent(fn_def); | 131 | let fn_def = indent.increase_indent(fn_def); |
132 | (fn_def, it.syntax().text_range().start() + TextUnit::from_usize(1)) | 132 | (fn_def, it.syntax().text_range().start() + TextSize::from_usize(1)) |
133 | } | 133 | } |
134 | }; | 134 | }; |
135 | 135 | ||
diff --git a/crates/ra_assists/src/handlers/add_impl.rs b/crates/ra_assists/src/handlers/add_impl.rs index 6622eadb2..d26f8b93d 100644 --- a/crates/ra_assists/src/handlers/add_impl.rs +++ b/crates/ra_assists/src/handlers/add_impl.rs | |||
@@ -1,6 +1,6 @@ | |||
1 | use ra_syntax::{ | 1 | use ra_syntax::{ |
2 | ast::{self, AstNode, NameOwner, TypeParamsOwner}, | 2 | ast::{self, AstNode, NameOwner, TypeParamsOwner}, |
3 | TextUnit, | 3 | TextSize, |
4 | }; | 4 | }; |
5 | use stdx::{format_to, SepBy}; | 5 | use stdx::{format_to, SepBy}; |
6 | 6 | ||
@@ -51,7 +51,7 @@ pub(crate) fn add_impl(ctx: AssistCtx) -> Option<Assist> { | |||
51 | format_to!(buf, "<{}>", generic_params) | 51 | format_to!(buf, "<{}>", generic_params) |
52 | } | 52 | } |
53 | buf.push_str(" {\n"); | 53 | buf.push_str(" {\n"); |
54 | edit.set_cursor(start_offset + TextUnit::of_str(&buf)); | 54 | edit.set_cursor(start_offset + TextSize::of(&buf)); |
55 | buf.push_str("\n}"); | 55 | buf.push_str("\n}"); |
56 | edit.insert(start_offset, buf); | 56 | edit.insert(start_offset, buf); |
57 | }) | 57 | }) |
diff --git a/crates/ra_assists/src/handlers/add_new.rs b/crates/ra_assists/src/handlers/add_new.rs index 240b19fa3..0698cce88 100644 --- a/crates/ra_assists/src/handlers/add_new.rs +++ b/crates/ra_assists/src/handlers/add_new.rs | |||
@@ -3,7 +3,7 @@ use ra_syntax::{ | |||
3 | ast::{ | 3 | ast::{ |
4 | self, AstNode, NameOwner, StructKind, TypeAscriptionOwner, TypeParamsOwner, VisibilityOwner, | 4 | self, AstNode, NameOwner, StructKind, TypeAscriptionOwner, TypeParamsOwner, VisibilityOwner, |
5 | }, | 5 | }, |
6 | TextUnit, T, | 6 | TextSize, T, |
7 | }; | 7 | }; |
8 | use stdx::{format_to, SepBy}; | 8 | use stdx::{format_to, SepBy}; |
9 | 9 | ||
@@ -77,16 +77,16 @@ pub(crate) fn add_new(ctx: AssistCtx) -> Option<Assist> { | |||
77 | .text_range() | 77 | .text_range() |
78 | .end(); | 78 | .end(); |
79 | 79 | ||
80 | Some((start, TextUnit::from_usize(1))) | 80 | Some((start, TextSize::from_usize(1))) |
81 | }) | 81 | }) |
82 | .unwrap_or_else(|| { | 82 | .unwrap_or_else(|| { |
83 | buf = generate_impl_text(&strukt, &buf); | 83 | buf = generate_impl_text(&strukt, &buf); |
84 | let start = strukt.syntax().text_range().end(); | 84 | let start = strukt.syntax().text_range().end(); |
85 | 85 | ||
86 | (start, TextUnit::from_usize(3)) | 86 | (start, TextSize::from_usize(3)) |
87 | }); | 87 | }); |
88 | 88 | ||
89 | edit.set_cursor(start_offset + TextUnit::of_str(&buf) - end_offset); | 89 | edit.set_cursor(start_offset + TextSize::of(&buf) - end_offset); |
90 | edit.insert(start_offset, buf); | 90 | edit.insert(start_offset, buf); |
91 | }) | 91 | }) |
92 | } | 92 | } |
diff --git a/crates/ra_assists/src/handlers/apply_demorgan.rs b/crates/ra_assists/src/handlers/apply_demorgan.rs index 239807e24..260b9e073 100644 --- a/crates/ra_assists/src/handlers/apply_demorgan.rs +++ b/crates/ra_assists/src/handlers/apply_demorgan.rs | |||
@@ -26,7 +26,7 @@ pub(crate) fn apply_demorgan(ctx: AssistCtx) -> Option<Assist> { | |||
26 | let op = expr.op_kind()?; | 26 | let op = expr.op_kind()?; |
27 | let op_range = expr.op_token()?.text_range(); | 27 | let op_range = expr.op_token()?.text_range(); |
28 | let opposite_op = opposite_logic_op(op)?; | 28 | let opposite_op = opposite_logic_op(op)?; |
29 | let cursor_in_range = ctx.frange.range.is_subrange(&op_range); | 29 | let cursor_in_range = op_range.contains_range(ctx.frange.range); |
30 | if !cursor_in_range { | 30 | if !cursor_in_range { |
31 | return None; | 31 | return None; |
32 | } | 32 | } |
diff --git a/crates/ra_assists/src/handlers/change_visibility.rs b/crates/ra_assists/src/handlers/change_visibility.rs index cd6d1ee6c..44f6a1dae 100644 --- a/crates/ra_assists/src/handlers/change_visibility.rs +++ b/crates/ra_assists/src/handlers/change_visibility.rs | |||
@@ -5,7 +5,7 @@ use ra_syntax::{ | |||
5 | ATTR, COMMENT, CONST_DEF, ENUM_DEF, FN_DEF, MODULE, STRUCT_DEF, TRAIT_DEF, VISIBILITY, | 5 | ATTR, COMMENT, CONST_DEF, ENUM_DEF, FN_DEF, MODULE, STRUCT_DEF, TRAIT_DEF, VISIBILITY, |
6 | WHITESPACE, | 6 | WHITESPACE, |
7 | }, | 7 | }, |
8 | SyntaxNode, TextUnit, T, | 8 | SyntaxNode, TextSize, T, |
9 | }; | 9 | }; |
10 | 10 | ||
11 | use crate::{Assist, AssistCtx, AssistId}; | 11 | use crate::{Assist, AssistCtx, AssistId}; |
@@ -67,7 +67,7 @@ fn add_vis(ctx: AssistCtx) -> Option<Assist> { | |||
67 | }) | 67 | }) |
68 | } | 68 | } |
69 | 69 | ||
70 | fn vis_offset(node: &SyntaxNode) -> TextUnit { | 70 | fn vis_offset(node: &SyntaxNode) -> TextSize { |
71 | node.children_with_tokens() | 71 | node.children_with_tokens() |
72 | .skip_while(|it| match it.kind() { | 72 | .skip_while(|it| match it.kind() { |
73 | WHITESPACE | COMMENT | ATTR => true, | 73 | WHITESPACE | COMMENT | ATTR => true, |
diff --git a/crates/ra_assists/src/handlers/flip_binexpr.rs b/crates/ra_assists/src/handlers/flip_binexpr.rs index bfcc09e90..8030efb35 100644 --- a/crates/ra_assists/src/handlers/flip_binexpr.rs +++ b/crates/ra_assists/src/handlers/flip_binexpr.rs | |||
@@ -23,7 +23,7 @@ pub(crate) fn flip_binexpr(ctx: AssistCtx) -> Option<Assist> { | |||
23 | let rhs = expr.rhs()?.syntax().clone(); | 23 | let rhs = expr.rhs()?.syntax().clone(); |
24 | let op_range = expr.op_token()?.text_range(); | 24 | let op_range = expr.op_token()?.text_range(); |
25 | // The assist should be applied only if the cursor is on the operator | 25 | // The assist should be applied only if the cursor is on the operator |
26 | let cursor_in_range = ctx.frange.range.is_subrange(&op_range); | 26 | let cursor_in_range = op_range.contains_range(ctx.frange.range); |
27 | if !cursor_in_range { | 27 | if !cursor_in_range { |
28 | return None; | 28 | return None; |
29 | } | 29 | } |
diff --git a/crates/ra_assists/src/handlers/inline_local_variable.rs b/crates/ra_assists/src/handlers/inline_local_variable.rs index c4fb425b0..f5702f6e0 100644 --- a/crates/ra_assists/src/handlers/inline_local_variable.rs +++ b/crates/ra_assists/src/handlers/inline_local_variable.rs | |||
@@ -52,7 +52,7 @@ pub(crate) fn inline_local_variable(ctx: AssistCtx) -> Option<Assist> { | |||
52 | .next_sibling_or_token() | 52 | .next_sibling_or_token() |
53 | .and_then(|it| ast::Whitespace::cast(it.as_token()?.clone())) | 53 | .and_then(|it| ast::Whitespace::cast(it.as_token()?.clone())) |
54 | { | 54 | { |
55 | TextRange::from_to( | 55 | TextRange::new( |
56 | let_stmt.syntax().text_range().start(), | 56 | let_stmt.syntax().text_range().start(), |
57 | whitespace.syntax().text_range().end(), | 57 | whitespace.syntax().text_range().end(), |
58 | ) | 58 | ) |
diff --git a/crates/ra_assists/src/handlers/introduce_variable.rs b/crates/ra_assists/src/handlers/introduce_variable.rs index 8c09e6bcd..eda9ac296 100644 --- a/crates/ra_assists/src/handlers/introduce_variable.rs +++ b/crates/ra_assists/src/handlers/introduce_variable.rs | |||
@@ -4,7 +4,7 @@ use ra_syntax::{ | |||
4 | BLOCK_EXPR, BREAK_EXPR, COMMENT, LAMBDA_EXPR, LOOP_EXPR, MATCH_ARM, PATH_EXPR, RETURN_EXPR, | 4 | BLOCK_EXPR, BREAK_EXPR, COMMENT, LAMBDA_EXPR, LOOP_EXPR, MATCH_ARM, PATH_EXPR, RETURN_EXPR, |
5 | WHITESPACE, | 5 | WHITESPACE, |
6 | }, | 6 | }, |
7 | SyntaxNode, TextUnit, | 7 | SyntaxNode, TextSize, |
8 | }; | 8 | }; |
9 | use stdx::format_to; | 9 | use stdx::format_to; |
10 | use test_utils::tested_by; | 10 | use test_utils::tested_by; |
@@ -47,10 +47,10 @@ pub(crate) fn introduce_variable(ctx: AssistCtx) -> Option<Assist> { | |||
47 | 47 | ||
48 | let cursor_offset = if wrap_in_block { | 48 | let cursor_offset = if wrap_in_block { |
49 | buf.push_str("{ let var_name = "); | 49 | buf.push_str("{ let var_name = "); |
50 | TextUnit::of_str("{ let ") | 50 | TextSize::of("{ let ") |
51 | } else { | 51 | } else { |
52 | buf.push_str("let var_name = "); | 52 | buf.push_str("let var_name = "); |
53 | TextUnit::of_str("let ") | 53 | TextSize::of("let ") |
54 | }; | 54 | }; |
55 | format_to!(buf, "{}", expr.syntax()); | 55 | format_to!(buf, "{}", expr.syntax()); |
56 | let full_stmt = ast::ExprStmt::cast(anchor_stmt.clone()); | 56 | let full_stmt = ast::ExprStmt::cast(anchor_stmt.clone()); |
diff --git a/crates/ra_assists/src/handlers/invert_if.rs b/crates/ra_assists/src/handlers/invert_if.rs index 4c5716868..682e08512 100644 --- a/crates/ra_assists/src/handlers/invert_if.rs +++ b/crates/ra_assists/src/handlers/invert_if.rs | |||
@@ -28,7 +28,7 @@ pub(crate) fn invert_if(ctx: AssistCtx) -> Option<Assist> { | |||
28 | let if_keyword = ctx.find_token_at_offset(T![if])?; | 28 | let if_keyword = ctx.find_token_at_offset(T![if])?; |
29 | let expr = ast::IfExpr::cast(if_keyword.parent())?; | 29 | let expr = ast::IfExpr::cast(if_keyword.parent())?; |
30 | let if_range = if_keyword.text_range(); | 30 | let if_range = if_keyword.text_range(); |
31 | let cursor_in_range = ctx.frange.range.is_subrange(&if_range); | 31 | let cursor_in_range = if_range.contains_range(ctx.frange.range); |
32 | if !cursor_in_range { | 32 | if !cursor_in_range { |
33 | return None; | 33 | return None; |
34 | } | 34 | } |
diff --git a/crates/ra_assists/src/handlers/merge_match_arms.rs b/crates/ra_assists/src/handlers/merge_match_arms.rs index eb967ab92..cd0416f01 100644 --- a/crates/ra_assists/src/handlers/merge_match_arms.rs +++ b/crates/ra_assists/src/handlers/merge_match_arms.rs | |||
@@ -3,7 +3,7 @@ use std::iter::successors; | |||
3 | use ra_syntax::{ | 3 | use ra_syntax::{ |
4 | algo::neighbor, | 4 | algo::neighbor, |
5 | ast::{self, AstNode}, | 5 | ast::{self, AstNode}, |
6 | Direction, TextUnit, | 6 | Direction, TextSize, |
7 | }; | 7 | }; |
8 | 8 | ||
9 | use crate::{Assist, AssistCtx, AssistId, TextRange}; | 9 | use crate::{Assist, AssistCtx, AssistId, TextRange}; |
@@ -42,8 +42,8 @@ pub(crate) fn merge_match_arms(ctx: AssistCtx) -> Option<Assist> { | |||
42 | let current_text_range = current_arm.syntax().text_range(); | 42 | let current_text_range = current_arm.syntax().text_range(); |
43 | 43 | ||
44 | enum CursorPos { | 44 | enum CursorPos { |
45 | InExpr(TextUnit), | 45 | InExpr(TextSize), |
46 | InPat(TextUnit), | 46 | InPat(TextSize), |
47 | } | 47 | } |
48 | let cursor_pos = ctx.frange.range.start(); | 48 | let cursor_pos = ctx.frange.range.start(); |
49 | let cursor_pos = if current_expr.syntax().text_range().contains(cursor_pos) { | 49 | let cursor_pos = if current_expr.syntax().text_range().contains(cursor_pos) { |
@@ -89,10 +89,10 @@ pub(crate) fn merge_match_arms(ctx: AssistCtx) -> Option<Assist> { | |||
89 | 89 | ||
90 | edit.target(current_text_range); | 90 | edit.target(current_text_range); |
91 | edit.set_cursor(match cursor_pos { | 91 | edit.set_cursor(match cursor_pos { |
92 | CursorPos::InExpr(back_offset) => start + TextUnit::from_usize(arm.len()) - back_offset, | 92 | CursorPos::InExpr(back_offset) => start + TextSize::from_usize(arm.len()) - back_offset, |
93 | CursorPos::InPat(offset) => offset, | 93 | CursorPos::InPat(offset) => offset, |
94 | }); | 94 | }); |
95 | edit.replace(TextRange::from_to(start, end), arm); | 95 | edit.replace(TextRange::new(start, end), arm); |
96 | }) | 96 | }) |
97 | } | 97 | } |
98 | 98 | ||
diff --git a/crates/ra_assists/src/handlers/move_guard.rs b/crates/ra_assists/src/handlers/move_guard.rs index 1cc498638..d5ccdd91c 100644 --- a/crates/ra_assists/src/handlers/move_guard.rs +++ b/crates/ra_assists/src/handlers/move_guard.rs | |||
@@ -1,7 +1,7 @@ | |||
1 | use ra_syntax::{ | 1 | use ra_syntax::{ |
2 | ast, | 2 | ast, |
3 | ast::{AstNode, AstToken, IfExpr, MatchArm}, | 3 | ast::{AstNode, AstToken, IfExpr, MatchArm}, |
4 | TextUnit, | 4 | TextSize, |
5 | }; | 5 | }; |
6 | 6 | ||
7 | use crate::{Assist, AssistCtx, AssistId}; | 7 | use crate::{Assist, AssistCtx, AssistId}; |
@@ -49,16 +49,16 @@ pub(crate) fn move_guard_to_arm_body(ctx: AssistCtx) -> Option<Assist> { | |||
49 | edit.delete(ele); | 49 | edit.delete(ele); |
50 | ele.len() | 50 | ele.len() |
51 | } else { | 51 | } else { |
52 | TextUnit::from(0) | 52 | TextSize::from(0) |
53 | } | 53 | } |
54 | } | 54 | } |
55 | _ => TextUnit::from(0), | 55 | _ => TextSize::from(0), |
56 | }; | 56 | }; |
57 | 57 | ||
58 | edit.delete(guard.syntax().text_range()); | 58 | edit.delete(guard.syntax().text_range()); |
59 | edit.replace_node_and_indent(arm_expr.syntax(), buf); | 59 | edit.replace_node_and_indent(arm_expr.syntax(), buf); |
60 | edit.set_cursor( | 60 | edit.set_cursor( |
61 | arm_expr.syntax().text_range().start() + TextUnit::from(3) - offseting_amount, | 61 | arm_expr.syntax().text_range().start() + TextSize::from(3) - offseting_amount, |
62 | ); | 62 | ); |
63 | }) | 63 | }) |
64 | } | 64 | } |
@@ -123,7 +123,7 @@ pub(crate) fn move_arm_cond_to_match_guard(ctx: AssistCtx) -> Option<Assist> { | |||
123 | } | 123 | } |
124 | 124 | ||
125 | edit.insert(match_pat.syntax().text_range().end(), buf); | 125 | edit.insert(match_pat.syntax().text_range().end(), buf); |
126 | edit.set_cursor(match_pat.syntax().text_range().end() + TextUnit::from(1)); | 126 | edit.set_cursor(match_pat.syntax().text_range().end() + TextSize::from(1)); |
127 | }, | 127 | }, |
128 | ) | 128 | ) |
129 | } | 129 | } |
diff --git a/crates/ra_assists/src/handlers/raw_string.rs b/crates/ra_assists/src/handlers/raw_string.rs index 7e4b83f13..567400b9c 100644 --- a/crates/ra_assists/src/handlers/raw_string.rs +++ b/crates/ra_assists/src/handlers/raw_string.rs | |||
@@ -2,7 +2,7 @@ use ra_syntax::{ | |||
2 | ast::{self, HasStringValue}, | 2 | ast::{self, HasStringValue}, |
3 | AstToken, | 3 | AstToken, |
4 | SyntaxKind::{RAW_STRING, STRING}, | 4 | SyntaxKind::{RAW_STRING, STRING}, |
5 | TextUnit, | 5 | TextSize, |
6 | }; | 6 | }; |
7 | 7 | ||
8 | use crate::{Assist, AssistCtx, AssistId}; | 8 | use crate::{Assist, AssistCtx, AssistId}; |
@@ -81,7 +81,7 @@ pub(crate) fn add_hash(ctx: AssistCtx) -> Option<Assist> { | |||
81 | let token = ctx.find_token_at_offset(RAW_STRING)?; | 81 | let token = ctx.find_token_at_offset(RAW_STRING)?; |
82 | ctx.add_assist(AssistId("add_hash"), "Add # to raw string", |edit| { | 82 | ctx.add_assist(AssistId("add_hash"), "Add # to raw string", |edit| { |
83 | edit.target(token.text_range()); | 83 | edit.target(token.text_range()); |
84 | edit.insert(token.text_range().start() + TextUnit::of_char('r'), "#"); | 84 | edit.insert(token.text_range().start() + TextSize::of('r'), "#"); |
85 | edit.insert(token.text_range().end(), "#"); | 85 | edit.insert(token.text_range().end(), "#"); |
86 | }) | 86 | }) |
87 | } | 87 | } |
diff --git a/crates/ra_assists/src/handlers/remove_dbg.rs b/crates/ra_assists/src/handlers/remove_dbg.rs index 5085649b4..4e5eb4350 100644 --- a/crates/ra_assists/src/handlers/remove_dbg.rs +++ b/crates/ra_assists/src/handlers/remove_dbg.rs | |||
@@ -1,6 +1,6 @@ | |||
1 | use ra_syntax::{ | 1 | use ra_syntax::{ |
2 | ast::{self, AstNode}, | 2 | ast::{self, AstNode}, |
3 | TextUnit, T, | 3 | TextSize, T, |
4 | }; | 4 | }; |
5 | 5 | ||
6 | use crate::{Assist, AssistCtx, AssistId}; | 6 | use crate::{Assist, AssistCtx, AssistId}; |
@@ -38,9 +38,9 @@ pub(crate) fn remove_dbg(ctx: AssistCtx) -> Option<Assist> { | |||
38 | let offset_start = file_range | 38 | let offset_start = file_range |
39 | .start() | 39 | .start() |
40 | .checked_sub(macro_range.start()) | 40 | .checked_sub(macro_range.start()) |
41 | .unwrap_or_else(|| TextUnit::from(0)); | 41 | .unwrap_or_else(|| TextSize::from(0)); |
42 | 42 | ||
43 | let dbg_size = TextUnit::of_str("dbg!("); | 43 | let dbg_size = TextSize::of("dbg!("); |
44 | 44 | ||
45 | if offset_start > dbg_size { | 45 | if offset_start > dbg_size { |
46 | file_range.start() - dbg_size | 46 | file_range.start() - dbg_size |
@@ -53,7 +53,7 @@ pub(crate) fn remove_dbg(ctx: AssistCtx) -> Option<Assist> { | |||
53 | let macro_args = macro_call.token_tree()?.syntax().clone(); | 53 | let macro_args = macro_call.token_tree()?.syntax().clone(); |
54 | 54 | ||
55 | let text = macro_args.text(); | 55 | let text = macro_args.text(); |
56 | let without_parens = TextUnit::of_char('(')..text.len() - TextUnit::of_char(')'); | 56 | let without_parens = TextSize::of('(')..text.len() - TextSize::of(')'); |
57 | text.slice(without_parens).to_string() | 57 | text.slice(without_parens).to_string() |
58 | }; | 58 | }; |
59 | 59 | ||
diff --git a/crates/ra_assists/src/handlers/remove_mut.rs b/crates/ra_assists/src/handlers/remove_mut.rs index 6884830eb..e598023b2 100644 --- a/crates/ra_assists/src/handlers/remove_mut.rs +++ b/crates/ra_assists/src/handlers/remove_mut.rs | |||
@@ -27,6 +27,6 @@ pub(crate) fn remove_mut(ctx: AssistCtx) -> Option<Assist> { | |||
27 | 27 | ||
28 | ctx.add_assist(AssistId("remove_mut"), "Remove `mut` keyword", |edit| { | 28 | ctx.add_assist(AssistId("remove_mut"), "Remove `mut` keyword", |edit| { |
29 | edit.set_cursor(delete_from); | 29 | edit.set_cursor(delete_from); |
30 | edit.delete(TextRange::from_to(delete_from, delete_to)); | 30 | edit.delete(TextRange::new(delete_from, delete_to)); |
31 | }) | 31 | }) |
32 | } | 32 | } |
diff --git a/crates/ra_assists/src/handlers/replace_qualified_name_with_use.rs b/crates/ra_assists/src/handlers/replace_qualified_name_with_use.rs index 94f5d6c50..2f02df303 100644 --- a/crates/ra_assists/src/handlers/replace_qualified_name_with_use.rs +++ b/crates/ra_assists/src/handlers/replace_qualified_name_with_use.rs | |||
@@ -43,7 +43,7 @@ pub(crate) fn replace_qualified_name_with_use(ctx: AssistCtx) -> Option<Assist> | |||
43 | if let Some(last) = path.segment() { | 43 | if let Some(last) = path.segment() { |
44 | // Here we are assuming the assist will provide a correct use statement | 44 | // Here we are assuming the assist will provide a correct use statement |
45 | // so we can delete the path qualifier | 45 | // so we can delete the path qualifier |
46 | edit.delete(TextRange::from_to( | 46 | edit.delete(TextRange::new( |
47 | path.syntax().text_range().start(), | 47 | path.syntax().text_range().start(), |
48 | last.syntax().text_range().start(), | 48 | last.syntax().text_range().start(), |
49 | )); | 49 | )); |
diff --git a/crates/ra_assists/src/lib.rs b/crates/ra_assists/src/lib.rs index ccc95735f..3ffbe4c51 100644 --- a/crates/ra_assists/src/lib.rs +++ b/crates/ra_assists/src/lib.rs | |||
@@ -19,7 +19,7 @@ pub mod ast_transform; | |||
19 | 19 | ||
20 | use ra_db::{FileId, FileRange}; | 20 | use ra_db::{FileId, FileRange}; |
21 | use ra_ide_db::RootDatabase; | 21 | use ra_ide_db::RootDatabase; |
22 | use ra_syntax::{TextRange, TextUnit}; | 22 | use ra_syntax::{TextRange, TextSize}; |
23 | use ra_text_edit::TextEdit; | 23 | use ra_text_edit::TextEdit; |
24 | 24 | ||
25 | pub(crate) use crate::assist_ctx::{Assist, AssistCtx, AssistHandler}; | 25 | pub(crate) use crate::assist_ctx::{Assist, AssistCtx, AssistHandler}; |
@@ -51,7 +51,7 @@ impl AssistLabel { | |||
51 | #[derive(Debug, Clone)] | 51 | #[derive(Debug, Clone)] |
52 | pub struct AssistAction { | 52 | pub struct AssistAction { |
53 | pub edit: TextEdit, | 53 | pub edit: TextEdit, |
54 | pub cursor_position: Option<TextUnit>, | 54 | pub cursor_position: Option<TextSize>, |
55 | // FIXME: This belongs to `AssistLabel` | 55 | // FIXME: This belongs to `AssistLabel` |
56 | pub target: Option<TextRange>, | 56 | pub target: Option<TextRange>, |
57 | pub file: AssistFile, | 57 | pub file: AssistFile, |
@@ -104,7 +104,7 @@ pub fn resolved_assists(db: &RootDatabase, range: FileRange) -> Vec<ResolvedAssi | |||
104 | .flat_map(|it| it.0) | 104 | .flat_map(|it| it.0) |
105 | .map(|it| it.into_resolved().unwrap()) | 105 | .map(|it| it.into_resolved().unwrap()) |
106 | .collect::<Vec<_>>(); | 106 | .collect::<Vec<_>>(); |
107 | a.sort_by_key(|it| it.action.target.map_or(TextUnit::from(!0u32), |it| it.len())); | 107 | a.sort_by_key(|it| it.action.target.map_or(TextSize::from(!0u32), |it| it.len())); |
108 | a | 108 | a |
109 | } | 109 | } |
110 | 110 | ||
diff --git a/crates/ra_db/src/lib.rs b/crates/ra_db/src/lib.rs index a06f59c14..fd4280de2 100644 --- a/crates/ra_db/src/lib.rs +++ b/crates/ra_db/src/lib.rs | |||
@@ -6,7 +6,7 @@ pub mod fixture; | |||
6 | use std::{panic, sync::Arc}; | 6 | use std::{panic, sync::Arc}; |
7 | 7 | ||
8 | use ra_prof::profile; | 8 | use ra_prof::profile; |
9 | use ra_syntax::{ast, Parse, SourceFile, TextRange, TextUnit}; | 9 | use ra_syntax::{ast, Parse, SourceFile, TextRange, TextSize}; |
10 | 10 | ||
11 | pub use crate::{ | 11 | pub use crate::{ |
12 | cancellation::Canceled, | 12 | cancellation::Canceled, |
@@ -75,7 +75,7 @@ impl<T: salsa::Database> CheckCanceled for T { | |||
75 | #[derive(Clone, Copy, Debug)] | 75 | #[derive(Clone, Copy, Debug)] |
76 | pub struct FilePosition { | 76 | pub struct FilePosition { |
77 | pub file_id: FileId, | 77 | pub file_id: FileId, |
78 | pub offset: TextUnit, | 78 | pub offset: TextSize, |
79 | } | 79 | } |
80 | 80 | ||
81 | #[derive(Clone, Copy, Debug)] | 81 | #[derive(Clone, Copy, Debug)] |
diff --git a/crates/ra_hir/src/semantics.rs b/crates/ra_hir/src/semantics.rs index 5d6edc45c..e09cf3185 100644 --- a/crates/ra_hir/src/semantics.rs +++ b/crates/ra_hir/src/semantics.rs | |||
@@ -14,7 +14,7 @@ use ra_db::{FileId, FileRange}; | |||
14 | use ra_prof::profile; | 14 | use ra_prof::profile; |
15 | use ra_syntax::{ | 15 | use ra_syntax::{ |
16 | algo::{find_node_at_offset, skip_trivia_token}, | 16 | algo::{find_node_at_offset, skip_trivia_token}, |
17 | ast, AstNode, Direction, SyntaxNode, SyntaxToken, TextRange, TextUnit, | 17 | ast, AstNode, Direction, SyntaxNode, SyntaxToken, TextRange, TextSize, |
18 | }; | 18 | }; |
19 | use rustc_hash::{FxHashMap, FxHashSet}; | 19 | use rustc_hash::{FxHashMap, FxHashSet}; |
20 | 20 | ||
@@ -95,7 +95,7 @@ impl<'db, DB: HirDatabase> Semantics<'db, DB> { | |||
95 | let token = successors(Some(parent.with_value(token)), |token| { | 95 | let token = successors(Some(parent.with_value(token)), |token| { |
96 | let macro_call = token.value.ancestors().find_map(ast::MacroCall::cast)?; | 96 | let macro_call = token.value.ancestors().find_map(ast::MacroCall::cast)?; |
97 | let tt = macro_call.token_tree()?; | 97 | let tt = macro_call.token_tree()?; |
98 | if !token.value.text_range().is_subrange(&tt.syntax().text_range()) { | 98 | if !tt.syntax().text_range().contains_range(token.value.text_range()) { |
99 | return None; | 99 | return None; |
100 | } | 100 | } |
101 | let file_id = sa.expand(self.db, token.with_value(¯o_call))?; | 101 | let file_id = sa.expand(self.db, token.with_value(¯o_call))?; |
@@ -114,7 +114,7 @@ impl<'db, DB: HirDatabase> Semantics<'db, DB> { | |||
114 | pub fn descend_node_at_offset<N: ast::AstNode>( | 114 | pub fn descend_node_at_offset<N: ast::AstNode>( |
115 | &self, | 115 | &self, |
116 | node: &SyntaxNode, | 116 | node: &SyntaxNode, |
117 | offset: TextUnit, | 117 | offset: TextSize, |
118 | ) -> Option<N> { | 118 | ) -> Option<N> { |
119 | // Handle macro token cases | 119 | // Handle macro token cases |
120 | node.token_at_offset(offset) | 120 | node.token_at_offset(offset) |
@@ -142,7 +142,7 @@ impl<'db, DB: HirDatabase> Semantics<'db, DB> { | |||
142 | pub fn ancestors_at_offset_with_macros( | 142 | pub fn ancestors_at_offset_with_macros( |
143 | &self, | 143 | &self, |
144 | node: &SyntaxNode, | 144 | node: &SyntaxNode, |
145 | offset: TextUnit, | 145 | offset: TextSize, |
146 | ) -> impl Iterator<Item = SyntaxNode> + '_ { | 146 | ) -> impl Iterator<Item = SyntaxNode> + '_ { |
147 | node.token_at_offset(offset) | 147 | node.token_at_offset(offset) |
148 | .map(|token| self.ancestors_with_macros(token.parent())) | 148 | .map(|token| self.ancestors_with_macros(token.parent())) |
@@ -154,7 +154,7 @@ impl<'db, DB: HirDatabase> Semantics<'db, DB> { | |||
154 | pub fn find_node_at_offset_with_macros<N: AstNode>( | 154 | pub fn find_node_at_offset_with_macros<N: AstNode>( |
155 | &self, | 155 | &self, |
156 | node: &SyntaxNode, | 156 | node: &SyntaxNode, |
157 | offset: TextUnit, | 157 | offset: TextSize, |
158 | ) -> Option<N> { | 158 | ) -> Option<N> { |
159 | self.ancestors_at_offset_with_macros(node, offset).find_map(N::cast) | 159 | self.ancestors_at_offset_with_macros(node, offset).find_map(N::cast) |
160 | } | 160 | } |
@@ -164,7 +164,7 @@ impl<'db, DB: HirDatabase> Semantics<'db, DB> { | |||
164 | pub fn find_node_at_offset_with_descend<N: AstNode>( | 164 | pub fn find_node_at_offset_with_descend<N: AstNode>( |
165 | &self, | 165 | &self, |
166 | node: &SyntaxNode, | 166 | node: &SyntaxNode, |
167 | offset: TextUnit, | 167 | offset: TextSize, |
168 | ) -> Option<N> { | 168 | ) -> Option<N> { |
169 | if let Some(it) = find_node_at_offset(&node, offset) { | 169 | if let Some(it) = find_node_at_offset(&node, offset) { |
170 | return Some(it); | 170 | return Some(it); |
@@ -255,7 +255,7 @@ impl<'db, DB: HirDatabase> Semantics<'db, DB> { | |||
255 | SemanticsScope { db: self.db, resolver } | 255 | SemanticsScope { db: self.db, resolver } |
256 | } | 256 | } |
257 | 257 | ||
258 | pub fn scope_at_offset(&self, node: &SyntaxNode, offset: TextUnit) -> SemanticsScope<'db, DB> { | 258 | pub fn scope_at_offset(&self, node: &SyntaxNode, offset: TextSize) -> SemanticsScope<'db, DB> { |
259 | let node = self.find_file(node.clone()); | 259 | let node = self.find_file(node.clone()); |
260 | let resolver = self.analyze2(node.as_ref(), Some(offset)).resolver; | 260 | let resolver = self.analyze2(node.as_ref(), Some(offset)).resolver; |
261 | SemanticsScope { db: self.db, resolver } | 261 | SemanticsScope { db: self.db, resolver } |
@@ -271,7 +271,7 @@ impl<'db, DB: HirDatabase> Semantics<'db, DB> { | |||
271 | self.analyze2(src.as_ref(), None) | 271 | self.analyze2(src.as_ref(), None) |
272 | } | 272 | } |
273 | 273 | ||
274 | fn analyze2(&self, src: InFile<&SyntaxNode>, offset: Option<TextUnit>) -> SourceAnalyzer { | 274 | fn analyze2(&self, src: InFile<&SyntaxNode>, offset: Option<TextSize>) -> SourceAnalyzer { |
275 | let _p = profile("Semantics::analyze2"); | 275 | let _p = profile("Semantics::analyze2"); |
276 | 276 | ||
277 | let container = match self.with_ctx(|ctx| ctx.find_container(src)) { | 277 | let container = match self.with_ctx(|ctx| ctx.find_container(src)) { |
@@ -463,7 +463,7 @@ fn original_range_opt( | |||
463 | return None; | 463 | return None; |
464 | } | 464 | } |
465 | 465 | ||
466 | Some(first.with_value(first.value.text_range().extend_to(&last.value.text_range()))) | 466 | Some(first.with_value(first.value.text_range().cover(last.value.text_range()))) |
467 | })?) | 467 | })?) |
468 | } | 468 | } |
469 | 469 | ||
diff --git a/crates/ra_hir/src/source_analyzer.rs b/crates/ra_hir/src/source_analyzer.rs index 0ed6d0958..59a3a17d2 100644 --- a/crates/ra_hir/src/source_analyzer.rs +++ b/crates/ra_hir/src/source_analyzer.rs | |||
@@ -23,7 +23,7 @@ use hir_ty::{ | |||
23 | }; | 23 | }; |
24 | use ra_syntax::{ | 24 | use ra_syntax::{ |
25 | ast::{self, AstNode}, | 25 | ast::{self, AstNode}, |
26 | SyntaxNode, TextRange, TextUnit, | 26 | SyntaxNode, TextRange, TextSize, |
27 | }; | 27 | }; |
28 | 28 | ||
29 | use crate::{ | 29 | use crate::{ |
@@ -50,7 +50,7 @@ impl SourceAnalyzer { | |||
50 | db: &dyn HirDatabase, | 50 | db: &dyn HirDatabase, |
51 | def: DefWithBodyId, | 51 | def: DefWithBodyId, |
52 | node: InFile<&SyntaxNode>, | 52 | node: InFile<&SyntaxNode>, |
53 | offset: Option<TextUnit>, | 53 | offset: Option<TextSize>, |
54 | ) -> SourceAnalyzer { | 54 | ) -> SourceAnalyzer { |
55 | let (body, source_map) = db.body_with_source_map(def); | 55 | let (body, source_map) = db.body_with_source_map(def); |
56 | let scopes = db.expr_scopes(def); | 56 | let scopes = db.expr_scopes(def); |
@@ -318,7 +318,7 @@ fn scope_for_offset( | |||
318 | db: &dyn HirDatabase, | 318 | db: &dyn HirDatabase, |
319 | scopes: &ExprScopes, | 319 | scopes: &ExprScopes, |
320 | source_map: &BodySourceMap, | 320 | source_map: &BodySourceMap, |
321 | offset: InFile<TextUnit>, | 321 | offset: InFile<TextSize>, |
322 | ) -> Option<ScopeId> { | 322 | ) -> Option<ScopeId> { |
323 | scopes | 323 | scopes |
324 | .scope_by_expr() | 324 | .scope_by_expr() |
@@ -354,7 +354,7 @@ fn adjust( | |||
354 | source_map: &BodySourceMap, | 354 | source_map: &BodySourceMap, |
355 | expr_range: TextRange, | 355 | expr_range: TextRange, |
356 | file_id: HirFileId, | 356 | file_id: HirFileId, |
357 | offset: TextUnit, | 357 | offset: TextSize, |
358 | ) -> Option<ScopeId> { | 358 | ) -> Option<ScopeId> { |
359 | let child_scopes = scopes | 359 | let child_scopes = scopes |
360 | .scope_by_expr() | 360 | .scope_by_expr() |
@@ -369,15 +369,15 @@ fn adjust( | |||
369 | let node = source.value.to_node(&root); | 369 | let node = source.value.to_node(&root); |
370 | Some((node.syntax().text_range(), scope)) | 370 | Some((node.syntax().text_range(), scope)) |
371 | }) | 371 | }) |
372 | .filter(|(range, _)| { | 372 | .filter(|&(range, _)| { |
373 | range.start() <= offset && range.is_subrange(&expr_range) && *range != expr_range | 373 | range.start() <= offset && expr_range.contains_range(range) && range != expr_range |
374 | }); | 374 | }); |
375 | 375 | ||
376 | child_scopes | 376 | child_scopes |
377 | .max_by(|(r1, _), (r2, _)| { | 377 | .max_by(|&(r1, _), &(r2, _)| { |
378 | if r2.is_subrange(&r1) { | 378 | if r1.contains_range(r2) { |
379 | std::cmp::Ordering::Greater | 379 | std::cmp::Ordering::Greater |
380 | } else if r1.is_subrange(&r2) { | 380 | } else if r2.contains_range(r1) { |
381 | std::cmp::Ordering::Less | 381 | std::cmp::Ordering::Less |
382 | } else { | 382 | } else { |
383 | r1.start().cmp(&r2.start()) | 383 | r1.start().cmp(&r2.start()) |
diff --git a/crates/ra_hir_expand/src/builtin_macro.rs b/crates/ra_hir_expand/src/builtin_macro.rs index 3da137f2e..2ccebda28 100644 --- a/crates/ra_hir_expand/src/builtin_macro.rs +++ b/crates/ra_hir_expand/src/builtin_macro.rs | |||
@@ -2,7 +2,7 @@ | |||
2 | use crate::db::AstDatabase; | 2 | use crate::db::AstDatabase; |
3 | use crate::{ | 3 | use crate::{ |
4 | ast::{self, AstToken, HasStringValue}, | 4 | ast::{self, AstToken, HasStringValue}, |
5 | name, AstId, CrateId, MacroDefId, MacroDefKind, TextUnit, | 5 | name, AstId, CrateId, MacroDefId, MacroDefKind, TextSize, |
6 | }; | 6 | }; |
7 | 7 | ||
8 | use crate::{quote, EagerMacroId, LazyMacroId, MacroCallId}; | 8 | use crate::{quote, EagerMacroId, LazyMacroId, MacroCallId}; |
@@ -127,7 +127,7 @@ fn stringify_expand( | |||
127 | let arg = loc.kind.arg(db).ok_or_else(|| mbe::ExpandError::UnexpectedToken)?; | 127 | let arg = loc.kind.arg(db).ok_or_else(|| mbe::ExpandError::UnexpectedToken)?; |
128 | let macro_args = arg; | 128 | let macro_args = arg; |
129 | let text = macro_args.text(); | 129 | let text = macro_args.text(); |
130 | let without_parens = TextUnit::of_char('(')..text.len() - TextUnit::of_char(')'); | 130 | let without_parens = TextSize::of('(')..text.len() - TextSize::of(')'); |
131 | text.slice(without_parens).to_string() | 131 | text.slice(without_parens).to_string() |
132 | }; | 132 | }; |
133 | 133 | ||
diff --git a/crates/ra_hir_expand/src/lib.rs b/crates/ra_hir_expand/src/lib.rs index 86299459f..754a0f005 100644 --- a/crates/ra_hir_expand/src/lib.rs +++ b/crates/ra_hir_expand/src/lib.rs | |||
@@ -22,7 +22,7 @@ use ra_db::{impl_intern_key, salsa, CrateId, FileId}; | |||
22 | use ra_syntax::{ | 22 | use ra_syntax::{ |
23 | algo, | 23 | algo, |
24 | ast::{self, AstNode}, | 24 | ast::{self, AstNode}, |
25 | SyntaxNode, SyntaxToken, TextUnit, | 25 | SyntaxNode, SyntaxToken, TextSize, |
26 | }; | 26 | }; |
27 | 27 | ||
28 | use crate::ast_id_map::FileAstId; | 28 | use crate::ast_id_map::FileAstId; |
@@ -348,7 +348,7 @@ impl<N: AstNode> AstId<N> { | |||
348 | /// | 348 | /// |
349 | /// * `InFile<SyntaxNode>` -- syntax node in a file | 349 | /// * `InFile<SyntaxNode>` -- syntax node in a file |
350 | /// * `InFile<ast::FnDef>` -- ast node in a file | 350 | /// * `InFile<ast::FnDef>` -- ast node in a file |
351 | /// * `InFile<TextUnit>` -- offset in a file | 351 | /// * `InFile<TextSize>` -- offset in a file |
352 | #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)] | 352 | #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)] |
353 | pub struct InFile<T> { | 353 | pub struct InFile<T> { |
354 | pub file_id: HirFileId, | 354 | pub file_id: HirFileId, |
diff --git a/crates/ra_hir_ty/src/tests.rs b/crates/ra_hir_ty/src/tests.rs index 846005baa..b6a96bb5c 100644 --- a/crates/ra_hir_ty/src/tests.rs +++ b/crates/ra_hir_ty/src/tests.rs | |||
@@ -117,7 +117,7 @@ fn infer_with_mismatches(content: &str, include_mismatches: bool) -> String { | |||
117 | let macro_prefix = if node.file_id != file_id.into() { "!" } else { "" }; | 117 | let macro_prefix = if node.file_id != file_id.into() { "!" } else { "" }; |
118 | format_to!( | 118 | format_to!( |
119 | buf, | 119 | buf, |
120 | "{}{} '{}': {}\n", | 120 | "{}{:?} '{}': {}\n", |
121 | macro_prefix, | 121 | macro_prefix, |
122 | range, | 122 | range, |
123 | ellipsize(text, 15), | 123 | ellipsize(text, 15), |
@@ -134,7 +134,7 @@ fn infer_with_mismatches(content: &str, include_mismatches: bool) -> String { | |||
134 | let macro_prefix = if src_ptr.file_id != file_id.into() { "!" } else { "" }; | 134 | let macro_prefix = if src_ptr.file_id != file_id.into() { "!" } else { "" }; |
135 | format_to!( | 135 | format_to!( |
136 | buf, | 136 | buf, |
137 | "{}{}: expected {}, got {}\n", | 137 | "{}{:?}: expected {}, got {}\n", |
138 | macro_prefix, | 138 | macro_prefix, |
139 | range, | 139 | range, |
140 | mismatch.expected.display(&db), | 140 | mismatch.expected.display(&db), |
diff --git a/crates/ra_ide/src/call_info.rs b/crates/ra_ide/src/call_info.rs index 5da254a6e..780a03c13 100644 --- a/crates/ra_ide/src/call_info.rs +++ b/crates/ra_ide/src/call_info.rs | |||
@@ -126,7 +126,7 @@ impl FnCallNode { | |||
126 | ast::CallExpr(it) => Some(FnCallNode::CallExpr(it)), | 126 | ast::CallExpr(it) => Some(FnCallNode::CallExpr(it)), |
127 | ast::MethodCallExpr(it) => { | 127 | ast::MethodCallExpr(it) => { |
128 | let arg_list = it.arg_list()?; | 128 | let arg_list = it.arg_list()?; |
129 | if !syntax.text_range().is_subrange(&arg_list.syntax().text_range()) { | 129 | if !arg_list.syntax().text_range().contains_range(syntax.text_range()) { |
130 | return None; | 130 | return None; |
131 | } | 131 | } |
132 | Some(FnCallNode::MethodCallExpr(it)) | 132 | Some(FnCallNode::MethodCallExpr(it)) |
diff --git a/crates/ra_ide/src/completion/complete_keyword.rs b/crates/ra_ide/src/completion/complete_keyword.rs index adefb290e..306ce96dc 100644 --- a/crates/ra_ide/src/completion/complete_keyword.rs +++ b/crates/ra_ide/src/completion/complete_keyword.rs | |||
@@ -97,7 +97,7 @@ fn is_in_loop_body(leaf: &SyntaxToken) -> bool { | |||
97 | } | 97 | } |
98 | }; | 98 | }; |
99 | if let Some(body) = loop_body { | 99 | if let Some(body) = loop_body { |
100 | if leaf.text_range().is_subrange(&body.syntax().text_range()) { | 100 | if body.syntax().text_range().contains_range(leaf.text_range()) { |
101 | return true; | 101 | return true; |
102 | } | 102 | } |
103 | } | 103 | } |
diff --git a/crates/ra_ide/src/completion/complete_postfix.rs b/crates/ra_ide/src/completion/complete_postfix.rs index 8d397b0fe..d6a37d720 100644 --- a/crates/ra_ide/src/completion/complete_postfix.rs +++ b/crates/ra_ide/src/completion/complete_postfix.rs | |||
@@ -2,7 +2,7 @@ | |||
2 | 2 | ||
3 | use ra_syntax::{ | 3 | use ra_syntax::{ |
4 | ast::{self, AstNode}, | 4 | ast::{self, AstNode}, |
5 | TextRange, TextUnit, | 5 | TextRange, TextSize, |
6 | }; | 6 | }; |
7 | use ra_text_edit::TextEdit; | 7 | use ra_text_edit::TextEdit; |
8 | 8 | ||
@@ -115,7 +115,7 @@ pub(super) fn complete_postfix(acc: &mut Completions, ctx: &CompletionContext) { | |||
115 | fn get_receiver_text(receiver: &ast::Expr, receiver_is_ambiguous_float_literal: bool) -> String { | 115 | fn get_receiver_text(receiver: &ast::Expr, receiver_is_ambiguous_float_literal: bool) -> String { |
116 | if receiver_is_ambiguous_float_literal { | 116 | if receiver_is_ambiguous_float_literal { |
117 | let text = receiver.syntax().text(); | 117 | let text = receiver.syntax().text(); |
118 | let without_dot = ..text.len() - TextUnit::of_char('.'); | 118 | let without_dot = ..text.len() - TextSize::of('.'); |
119 | text.slice(without_dot).to_string() | 119 | text.slice(without_dot).to_string() |
120 | } else { | 120 | } else { |
121 | receiver.to_string() | 121 | receiver.to_string() |
@@ -143,7 +143,7 @@ fn postfix_snippet( | |||
143 | let edit = { | 143 | let edit = { |
144 | let receiver_syntax = receiver.syntax(); | 144 | let receiver_syntax = receiver.syntax(); |
145 | let receiver_range = ctx.sema.original_range(receiver_syntax).range; | 145 | let receiver_range = ctx.sema.original_range(receiver_syntax).range; |
146 | let delete_range = TextRange::from_to(receiver_range.start(), ctx.source_range().end()); | 146 | let delete_range = TextRange::new(receiver_range.start(), ctx.source_range().end()); |
147 | TextEdit::replace(delete_range, snippet.to_string()) | 147 | TextEdit::replace(delete_range, snippet.to_string()) |
148 | }; | 148 | }; |
149 | CompletionItem::new(CompletionKind::Postfix, ctx.source_range(), label) | 149 | CompletionItem::new(CompletionKind::Postfix, ctx.source_range(), label) |
diff --git a/crates/ra_ide/src/completion/complete_trait_impl.rs b/crates/ra_ide/src/completion/complete_trait_impl.rs index c39943252..e2a8c59cd 100644 --- a/crates/ra_ide/src/completion/complete_trait_impl.rs +++ b/crates/ra_ide/src/completion/complete_trait_impl.rs | |||
@@ -141,7 +141,7 @@ fn add_function_impl( | |||
141 | } else { | 141 | } else { |
142 | CompletionItemKind::Function | 142 | CompletionItemKind::Function |
143 | }; | 143 | }; |
144 | let range = TextRange::from_to(fn_def_node.text_range().start(), ctx.source_range().end()); | 144 | let range = TextRange::new(fn_def_node.text_range().start(), ctx.source_range().end()); |
145 | 145 | ||
146 | match ctx.config.snippet_cap { | 146 | match ctx.config.snippet_cap { |
147 | Some(cap) => { | 147 | Some(cap) => { |
@@ -167,7 +167,7 @@ fn add_type_alias_impl( | |||
167 | 167 | ||
168 | let snippet = format!("type {} = ", alias_name); | 168 | let snippet = format!("type {} = ", alias_name); |
169 | 169 | ||
170 | let range = TextRange::from_to(type_def_node.text_range().start(), ctx.source_range().end()); | 170 | let range = TextRange::new(type_def_node.text_range().start(), ctx.source_range().end()); |
171 | 171 | ||
172 | CompletionItem::new(CompletionKind::Magic, ctx.source_range(), snippet.clone()) | 172 | CompletionItem::new(CompletionKind::Magic, ctx.source_range(), snippet.clone()) |
173 | .text_edit(TextEdit::replace(range, snippet)) | 173 | .text_edit(TextEdit::replace(range, snippet)) |
@@ -189,7 +189,7 @@ fn add_const_impl( | |||
189 | let snippet = make_const_compl_syntax(&const_.source(ctx.db).value); | 189 | let snippet = make_const_compl_syntax(&const_.source(ctx.db).value); |
190 | 190 | ||
191 | let range = | 191 | let range = |
192 | TextRange::from_to(const_def_node.text_range().start(), ctx.source_range().end()); | 192 | TextRange::new(const_def_node.text_range().start(), ctx.source_range().end()); |
193 | 193 | ||
194 | CompletionItem::new(CompletionKind::Magic, ctx.source_range(), snippet.clone()) | 194 | CompletionItem::new(CompletionKind::Magic, ctx.source_range(), snippet.clone()) |
195 | .text_edit(TextEdit::replace(range, snippet)) | 195 | .text_edit(TextEdit::replace(range, snippet)) |
@@ -216,7 +216,7 @@ fn make_const_compl_syntax(const_: &ast::ConstDef) -> String { | |||
216 | .map_or(const_end, |f| f.text_range().start()); | 216 | .map_or(const_end, |f| f.text_range().start()); |
217 | 217 | ||
218 | let len = end - start; | 218 | let len = end - start; |
219 | let range = TextRange::from_to(0.into(), len); | 219 | let range = TextRange::new(0.into(), len); |
220 | 220 | ||
221 | let syntax = const_.syntax().text().slice(range).to_string(); | 221 | let syntax = const_.syntax().text().slice(range).to_string(); |
222 | 222 | ||
diff --git a/crates/ra_ide/src/completion/completion_context.rs b/crates/ra_ide/src/completion/completion_context.rs index 37880448a..5f2797e41 100644 --- a/crates/ra_ide/src/completion/completion_context.rs +++ b/crates/ra_ide/src/completion/completion_context.rs | |||
@@ -7,7 +7,7 @@ use ra_syntax::{ | |||
7 | algo::{find_covering_element, find_node_at_offset}, | 7 | algo::{find_covering_element, find_node_at_offset}, |
8 | ast, AstNode, | 8 | ast, AstNode, |
9 | SyntaxKind::*, | 9 | SyntaxKind::*, |
10 | SyntaxNode, SyntaxToken, TextRange, TextUnit, | 10 | SyntaxNode, SyntaxToken, TextRange, TextSize, |
11 | }; | 11 | }; |
12 | use ra_text_edit::AtomTextEdit; | 12 | use ra_text_edit::AtomTextEdit; |
13 | 13 | ||
@@ -20,7 +20,7 @@ pub(crate) struct CompletionContext<'a> { | |||
20 | pub(super) sema: Semantics<'a, RootDatabase>, | 20 | pub(super) sema: Semantics<'a, RootDatabase>, |
21 | pub(super) db: &'a RootDatabase, | 21 | pub(super) db: &'a RootDatabase, |
22 | pub(super) config: &'a CompletionConfig, | 22 | pub(super) config: &'a CompletionConfig, |
23 | pub(super) offset: TextUnit, | 23 | pub(super) offset: TextSize, |
24 | /// The token before the cursor, in the original file. | 24 | /// The token before the cursor, in the original file. |
25 | pub(super) original_token: SyntaxToken, | 25 | pub(super) original_token: SyntaxToken, |
26 | /// The token before the cursor, in the macro-expanded file. | 26 | /// The token before the cursor, in the macro-expanded file. |
@@ -167,7 +167,7 @@ impl<'a> CompletionContext<'a> { | |||
167 | match self.token.kind() { | 167 | match self.token.kind() { |
168 | // workaroud when completion is triggered by trigger characters. | 168 | // workaroud when completion is triggered by trigger characters. |
169 | IDENT => self.original_token.text_range(), | 169 | IDENT => self.original_token.text_range(), |
170 | _ => TextRange::offset_len(self.offset, 0.into()), | 170 | _ => TextRange::empty(self.offset), |
171 | } | 171 | } |
172 | } | 172 | } |
173 | 173 | ||
@@ -190,7 +190,7 @@ impl<'a> CompletionContext<'a> { | |||
190 | &mut self, | 190 | &mut self, |
191 | original_file: &SyntaxNode, | 191 | original_file: &SyntaxNode, |
192 | file_with_fake_ident: SyntaxNode, | 192 | file_with_fake_ident: SyntaxNode, |
193 | offset: TextUnit, | 193 | offset: TextSize, |
194 | ) { | 194 | ) { |
195 | // First, let's try to complete a reference to some declaration. | 195 | // First, let's try to complete a reference to some declaration. |
196 | if let Some(name_ref) = find_node_at_offset::<ast::NameRef>(&file_with_fake_ident, offset) { | 196 | if let Some(name_ref) = find_node_at_offset::<ast::NameRef>(&file_with_fake_ident, offset) { |
@@ -224,7 +224,8 @@ impl<'a> CompletionContext<'a> { | |||
224 | } | 224 | } |
225 | if let Some(let_stmt) = bind_pat.syntax().ancestors().find_map(ast::LetStmt::cast) { | 225 | if let Some(let_stmt) = bind_pat.syntax().ancestors().find_map(ast::LetStmt::cast) { |
226 | if let Some(pat) = let_stmt.pat() { | 226 | if let Some(pat) = let_stmt.pat() { |
227 | if bind_pat.syntax().text_range().is_subrange(&pat.syntax().text_range()) { | 227 | if pat.syntax().text_range().contains_range(bind_pat.syntax().text_range()) |
228 | { | ||
228 | self.is_pat_binding_or_const = false; | 229 | self.is_pat_binding_or_const = false; |
229 | } | 230 | } |
230 | } | 231 | } |
@@ -246,7 +247,7 @@ impl<'a> CompletionContext<'a> { | |||
246 | &mut self, | 247 | &mut self, |
247 | original_file: &SyntaxNode, | 248 | original_file: &SyntaxNode, |
248 | name_ref: ast::NameRef, | 249 | name_ref: ast::NameRef, |
249 | offset: TextUnit, | 250 | offset: TextSize, |
250 | ) { | 251 | ) { |
251 | self.name_ref_syntax = | 252 | self.name_ref_syntax = |
252 | find_node_at_offset(&original_file, name_ref.syntax().text_range().start()); | 253 | find_node_at_offset(&original_file, name_ref.syntax().text_range().start()); |
diff --git a/crates/ra_ide/src/diagnostics.rs b/crates/ra_ide/src/diagnostics.rs index e7e201709..adfb1b9b2 100644 --- a/crates/ra_ide/src/diagnostics.rs +++ b/crates/ra_ide/src/diagnostics.rs | |||
@@ -171,7 +171,7 @@ fn text_edit_for_remove_unnecessary_braces_with_self_in_use_statement( | |||
171 | if single_use_tree.path()?.segment()?.syntax().first_child_or_token()?.kind() == T![self] { | 171 | if single_use_tree.path()?.segment()?.syntax().first_child_or_token()?.kind() == T![self] { |
172 | let start = use_tree_list_node.prev_sibling_or_token()?.text_range().start(); | 172 | let start = use_tree_list_node.prev_sibling_or_token()?.text_range().start(); |
173 | let end = use_tree_list_node.text_range().end(); | 173 | let end = use_tree_list_node.text_range().end(); |
174 | let range = TextRange::from_to(start, end); | 174 | let range = TextRange::new(start, end); |
175 | return Some(TextEdit::delete(range)); | 175 | return Some(TextEdit::delete(range)); |
176 | } | 176 | } |
177 | None | 177 | None |
diff --git a/crates/ra_ide/src/extend_selection.rs b/crates/ra_ide/src/extend_selection.rs index 753d2ef6a..9f329b5d3 100644 --- a/crates/ra_ide/src/extend_selection.rs +++ b/crates/ra_ide/src/extend_selection.rs | |||
@@ -9,7 +9,7 @@ use ra_syntax::{ | |||
9 | ast::{self, AstNode, AstToken}, | 9 | ast::{self, AstNode, AstToken}, |
10 | Direction, NodeOrToken, | 10 | Direction, NodeOrToken, |
11 | SyntaxKind::{self, *}, | 11 | SyntaxKind::{self, *}, |
12 | SyntaxNode, SyntaxToken, TextRange, TextUnit, TokenAtOffset, T, | 12 | SyntaxNode, SyntaxToken, TextRange, TextSize, TokenAtOffset, T, |
13 | }; | 13 | }; |
14 | 14 | ||
15 | use crate::FileRange; | 15 | use crate::FileRange; |
@@ -121,10 +121,10 @@ fn extend_tokens_from_range( | |||
121 | let mut first_token = skip_trivia_token(first_token, Direction::Next)?; | 121 | let mut first_token = skip_trivia_token(first_token, Direction::Next)?; |
122 | let mut last_token = skip_trivia_token(last_token, Direction::Prev)?; | 122 | let mut last_token = skip_trivia_token(last_token, Direction::Prev)?; |
123 | 123 | ||
124 | while !first_token.text_range().is_subrange(&original_range) { | 124 | while !original_range.contains_range(first_token.text_range()) { |
125 | first_token = skip_trivia_token(first_token.next_token()?, Direction::Next)?; | 125 | first_token = skip_trivia_token(first_token.next_token()?, Direction::Next)?; |
126 | } | 126 | } |
127 | while !last_token.text_range().is_subrange(&original_range) { | 127 | while !original_range.contains_range(last_token.text_range()) { |
128 | last_token = skip_trivia_token(last_token.prev_token()?, Direction::Prev)?; | 128 | last_token = skip_trivia_token(last_token.prev_token()?, Direction::Prev)?; |
129 | } | 129 | } |
130 | 130 | ||
@@ -161,8 +161,8 @@ fn extend_tokens_from_range( | |||
161 | .take_while(validate) | 161 | .take_while(validate) |
162 | .last()?; | 162 | .last()?; |
163 | 163 | ||
164 | let range = first.text_range().extend_to(&last.text_range()); | 164 | let range = first.text_range().cover(last.text_range()); |
165 | if original_range.is_subrange(&range) && original_range != range { | 165 | if range.contains_range(original_range) && original_range != range { |
166 | Some(range) | 166 | Some(range) |
167 | } else { | 167 | } else { |
168 | None | 168 | None |
@@ -176,7 +176,7 @@ fn shallowest_node(node: &SyntaxNode) -> SyntaxNode { | |||
176 | 176 | ||
177 | fn extend_single_word_in_comment_or_string( | 177 | fn extend_single_word_in_comment_or_string( |
178 | leaf: &SyntaxToken, | 178 | leaf: &SyntaxToken, |
179 | offset: TextUnit, | 179 | offset: TextSize, |
180 | ) -> Option<TextRange> { | 180 | ) -> Option<TextRange> { |
181 | let text: &str = leaf.text(); | 181 | let text: &str = leaf.text(); |
182 | let cursor_position: u32 = (offset - leaf.text_range().start()).into(); | 182 | let cursor_position: u32 = (offset - leaf.text_range().start()).into(); |
@@ -190,10 +190,10 @@ fn extend_single_word_in_comment_or_string( | |||
190 | let start_idx = before.rfind(non_word_char)? as u32; | 190 | let start_idx = before.rfind(non_word_char)? as u32; |
191 | let end_idx = after.find(non_word_char).unwrap_or_else(|| after.len()) as u32; | 191 | let end_idx = after.find(non_word_char).unwrap_or_else(|| after.len()) as u32; |
192 | 192 | ||
193 | let from: TextUnit = (start_idx + 1).into(); | 193 | let from: TextSize = (start_idx + 1).into(); |
194 | let to: TextUnit = (cursor_position + end_idx).into(); | 194 | let to: TextSize = (cursor_position + end_idx).into(); |
195 | 195 | ||
196 | let range = TextRange::from_to(from, to); | 196 | let range = TextRange::new(from, to); |
197 | if range.is_empty() { | 197 | if range.is_empty() { |
198 | None | 198 | None |
199 | } else { | 199 | } else { |
@@ -201,24 +201,24 @@ fn extend_single_word_in_comment_or_string( | |||
201 | } | 201 | } |
202 | } | 202 | } |
203 | 203 | ||
204 | fn extend_ws(root: &SyntaxNode, ws: SyntaxToken, offset: TextUnit) -> TextRange { | 204 | fn extend_ws(root: &SyntaxNode, ws: SyntaxToken, offset: TextSize) -> TextRange { |
205 | let ws_text = ws.text(); | 205 | let ws_text = ws.text(); |
206 | let suffix = TextRange::from_to(offset, ws.text_range().end()) - ws.text_range().start(); | 206 | let suffix = TextRange::new(offset, ws.text_range().end()) - ws.text_range().start(); |
207 | let prefix = TextRange::from_to(ws.text_range().start(), offset) - ws.text_range().start(); | 207 | let prefix = TextRange::new(ws.text_range().start(), offset) - ws.text_range().start(); |
208 | let ws_suffix = &ws_text.as_str()[suffix]; | 208 | let ws_suffix = &ws_text.as_str()[suffix]; |
209 | let ws_prefix = &ws_text.as_str()[prefix]; | 209 | let ws_prefix = &ws_text.as_str()[prefix]; |
210 | if ws_text.contains('\n') && !ws_suffix.contains('\n') { | 210 | if ws_text.contains('\n') && !ws_suffix.contains('\n') { |
211 | if let Some(node) = ws.next_sibling_or_token() { | 211 | if let Some(node) = ws.next_sibling_or_token() { |
212 | let start = match ws_prefix.rfind('\n') { | 212 | let start = match ws_prefix.rfind('\n') { |
213 | Some(idx) => ws.text_range().start() + TextUnit::from((idx + 1) as u32), | 213 | Some(idx) => ws.text_range().start() + TextSize::from((idx + 1) as u32), |
214 | None => node.text_range().start(), | 214 | None => node.text_range().start(), |
215 | }; | 215 | }; |
216 | let end = if root.text().char_at(node.text_range().end()) == Some('\n') { | 216 | let end = if root.text().char_at(node.text_range().end()) == Some('\n') { |
217 | node.text_range().end() + TextUnit::of_char('\n') | 217 | node.text_range().end() + TextSize::of('\n') |
218 | } else { | 218 | } else { |
219 | node.text_range().end() | 219 | node.text_range().end() |
220 | }; | 220 | }; |
221 | return TextRange::from_to(start, end); | 221 | return TextRange::new(start, end); |
222 | } | 222 | } |
223 | } | 223 | } |
224 | ws.text_range() | 224 | ws.text_range() |
@@ -270,13 +270,10 @@ fn extend_list_item(node: &SyntaxNode) -> Option<TextRange> { | |||
270 | .filter(|node| is_single_line_ws(node)) | 270 | .filter(|node| is_single_line_ws(node)) |
271 | .unwrap_or(delimiter_node); | 271 | .unwrap_or(delimiter_node); |
272 | 272 | ||
273 | return Some(TextRange::from_to(node.text_range().start(), final_node.text_range().end())); | 273 | return Some(TextRange::new(node.text_range().start(), final_node.text_range().end())); |
274 | } | 274 | } |
275 | if let Some(delimiter_node) = nearby_delimiter(delimiter, node, Direction::Prev) { | 275 | if let Some(delimiter_node) = nearby_delimiter(delimiter, node, Direction::Prev) { |
276 | return Some(TextRange::from_to( | 276 | return Some(TextRange::new(delimiter_node.text_range().start(), node.text_range().end())); |
277 | delimiter_node.text_range().start(), | ||
278 | node.text_range().end(), | ||
279 | )); | ||
280 | } | 277 | } |
281 | 278 | ||
282 | None | 279 | None |
@@ -286,10 +283,7 @@ fn extend_comments(comment: ast::Comment) -> Option<TextRange> { | |||
286 | let prev = adj_comments(&comment, Direction::Prev); | 283 | let prev = adj_comments(&comment, Direction::Prev); |
287 | let next = adj_comments(&comment, Direction::Next); | 284 | let next = adj_comments(&comment, Direction::Next); |
288 | if prev != next { | 285 | if prev != next { |
289 | Some(TextRange::from_to( | 286 | Some(TextRange::new(prev.syntax().text_range().start(), next.syntax().text_range().end())) |
290 | prev.syntax().text_range().start(), | ||
291 | next.syntax().text_range().end(), | ||
292 | )) | ||
293 | } else { | 287 | } else { |
294 | None | 288 | None |
295 | } | 289 | } |
diff --git a/crates/ra_ide/src/folding_ranges.rs b/crates/ra_ide/src/folding_ranges.rs index 4eeb76d14..034c4c7d4 100644 --- a/crates/ra_ide/src/folding_ranges.rs +++ b/crates/ra_ide/src/folding_ranges.rs | |||
@@ -141,7 +141,7 @@ fn contiguous_range_for_group_unless( | |||
141 | } | 141 | } |
142 | 142 | ||
143 | if first != &last { | 143 | if first != &last { |
144 | Some(TextRange::from_to(first.text_range().start(), last.text_range().end())) | 144 | Some(TextRange::new(first.text_range().start(), last.text_range().end())) |
145 | } else { | 145 | } else { |
146 | // The group consists of only one element, therefore it cannot be folded | 146 | // The group consists of only one element, therefore it cannot be folded |
147 | None | 147 | None |
@@ -187,7 +187,7 @@ fn contiguous_range_for_comment( | |||
187 | } | 187 | } |
188 | 188 | ||
189 | if first != last { | 189 | if first != last { |
190 | Some(TextRange::from_to( | 190 | Some(TextRange::new( |
191 | first.syntax().text_range().start(), | 191 | first.syntax().text_range().start(), |
192 | last.syntax().text_range().end(), | 192 | last.syntax().text_range().end(), |
193 | )) | 193 | )) |
diff --git a/crates/ra_ide/src/hover.rs b/crates/ra_ide/src/hover.rs index a31187994..fcc2ab7fb 100644 --- a/crates/ra_ide/src/hover.rs +++ b/crates/ra_ide/src/hover.rs | |||
@@ -275,7 +275,7 @@ mod tests { | |||
275 | ", | 275 | ", |
276 | ); | 276 | ); |
277 | let hover = analysis.hover(position).unwrap().unwrap(); | 277 | let hover = analysis.hover(position).unwrap().unwrap(); |
278 | assert_eq!(hover.range, TextRange::from_to(95.into(), 100.into())); | 278 | assert_eq!(hover.range, TextRange::new(95.into(), 100.into())); |
279 | assert_eq!(trim_markup_opt(hover.info.first()), Some("u32")); | 279 | assert_eq!(trim_markup_opt(hover.info.first()), Some("u32")); |
280 | } | 280 | } |
281 | 281 | ||
diff --git a/crates/ra_ide/src/join_lines.rs b/crates/ra_ide/src/join_lines.rs index 7d70dab9c..040846ec3 100644 --- a/crates/ra_ide/src/join_lines.rs +++ b/crates/ra_ide/src/join_lines.rs | |||
@@ -7,7 +7,7 @@ use ra_syntax::{ | |||
7 | ast::{self, AstNode, AstToken}, | 7 | ast::{self, AstNode, AstToken}, |
8 | Direction, NodeOrToken, SourceFile, | 8 | Direction, NodeOrToken, SourceFile, |
9 | SyntaxKind::{self, WHITESPACE}, | 9 | SyntaxKind::{self, WHITESPACE}, |
10 | SyntaxNode, SyntaxToken, TextRange, TextUnit, T, | 10 | SyntaxNode, SyntaxToken, TextRange, TextSize, T, |
11 | }; | 11 | }; |
12 | use ra_text_edit::{TextEdit, TextEditBuilder}; | 12 | use ra_text_edit::{TextEdit, TextEditBuilder}; |
13 | 13 | ||
@@ -19,7 +19,7 @@ pub fn join_lines(file: &SourceFile, range: TextRange) -> TextEdit { | |||
19 | None => return TextEditBuilder::default().finish(), | 19 | None => return TextEditBuilder::default().finish(), |
20 | Some(pos) => pos, | 20 | Some(pos) => pos, |
21 | }; | 21 | }; |
22 | TextRange::offset_len(range.start() + pos, TextUnit::of_char('\n')) | 22 | TextRange::at(range.start() + pos, TextSize::of('\n')) |
23 | } else { | 23 | } else { |
24 | range | 24 | range |
25 | }; | 25 | }; |
@@ -30,13 +30,13 @@ pub fn join_lines(file: &SourceFile, range: TextRange) -> TextEdit { | |||
30 | }; | 30 | }; |
31 | let mut edit = TextEditBuilder::default(); | 31 | let mut edit = TextEditBuilder::default(); |
32 | for token in node.descendants_with_tokens().filter_map(|it| it.into_token()) { | 32 | for token in node.descendants_with_tokens().filter_map(|it| it.into_token()) { |
33 | let range = match range.intersection(&token.text_range()) { | 33 | let range = match range.intersect(token.text_range()) { |
34 | Some(range) => range, | 34 | Some(range) => range, |
35 | None => continue, | 35 | None => continue, |
36 | } - token.text_range().start(); | 36 | } - token.text_range().start(); |
37 | let text = token.text(); | 37 | let text = token.text(); |
38 | for (pos, _) in text[range].bytes().enumerate().filter(|&(_, b)| b == b'\n') { | 38 | for (pos, _) in text[range].bytes().enumerate().filter(|&(_, b)| b == b'\n') { |
39 | let pos: TextUnit = (pos as u32).into(); | 39 | let pos: TextSize = (pos as u32).into(); |
40 | let off = token.text_range().start() + range.start() + pos; | 40 | let off = token.text_range().start() + range.start() + pos; |
41 | if !edit.invalidates_offset(off) { | 41 | if !edit.invalidates_offset(off) { |
42 | remove_newline(&mut edit, &token, off); | 42 | remove_newline(&mut edit, &token, off); |
@@ -47,16 +47,16 @@ pub fn join_lines(file: &SourceFile, range: TextRange) -> TextEdit { | |||
47 | edit.finish() | 47 | edit.finish() |
48 | } | 48 | } |
49 | 49 | ||
50 | fn remove_newline(edit: &mut TextEditBuilder, token: &SyntaxToken, offset: TextUnit) { | 50 | fn remove_newline(edit: &mut TextEditBuilder, token: &SyntaxToken, offset: TextSize) { |
51 | if token.kind() != WHITESPACE || token.text().bytes().filter(|&b| b == b'\n').count() != 1 { | 51 | if token.kind() != WHITESPACE || token.text().bytes().filter(|&b| b == b'\n').count() != 1 { |
52 | // The node is either the first or the last in the file | 52 | // The node is either the first or the last in the file |
53 | let suff = &token.text()[TextRange::from_to( | 53 | let suff = &token.text()[TextRange::new( |
54 | offset - token.text_range().start() + TextUnit::of_char('\n'), | 54 | offset - token.text_range().start() + TextSize::of('\n'), |
55 | TextUnit::of_str(token.text()), | 55 | TextSize::of(token.text().as_str()), |
56 | )]; | 56 | )]; |
57 | let spaces = suff.bytes().take_while(|&b| b == b' ').count(); | 57 | let spaces = suff.bytes().take_while(|&b| b == b' ').count(); |
58 | 58 | ||
59 | edit.replace(TextRange::offset_len(offset, ((spaces + 1) as u32).into()), " ".to_string()); | 59 | edit.replace(TextRange::at(offset, ((spaces + 1) as u32).into()), " ".to_string()); |
60 | return; | 60 | return; |
61 | } | 61 | } |
62 | 62 | ||
@@ -65,7 +65,7 @@ fn remove_newline(edit: &mut TextEditBuilder, token: &SyntaxToken, offset: TextU | |||
65 | let next = token.next_sibling_or_token().unwrap(); | 65 | let next = token.next_sibling_or_token().unwrap(); |
66 | if is_trailing_comma(prev.kind(), next.kind()) { | 66 | if is_trailing_comma(prev.kind(), next.kind()) { |
67 | // Removes: trailing comma, newline (incl. surrounding whitespace) | 67 | // Removes: trailing comma, newline (incl. surrounding whitespace) |
68 | edit.delete(TextRange::from_to(prev.text_range().start(), token.text_range().end())); | 68 | edit.delete(TextRange::new(prev.text_range().start(), token.text_range().end())); |
69 | return; | 69 | return; |
70 | } | 70 | } |
71 | if prev.kind() == T![,] && next.kind() == T!['}'] { | 71 | if prev.kind() == T![,] && next.kind() == T!['}'] { |
@@ -76,7 +76,7 @@ fn remove_newline(edit: &mut TextEditBuilder, token: &SyntaxToken, offset: TextU | |||
76 | " " | 76 | " " |
77 | }; | 77 | }; |
78 | edit.replace( | 78 | edit.replace( |
79 | TextRange::from_to(prev.text_range().start(), token.text_range().end()), | 79 | TextRange::new(prev.text_range().start(), token.text_range().end()), |
80 | space.to_string(), | 80 | space.to_string(), |
81 | ); | 81 | ); |
82 | return; | 82 | return; |
@@ -87,9 +87,9 @@ fn remove_newline(edit: &mut TextEditBuilder, token: &SyntaxToken, offset: TextU | |||
87 | next.as_token().cloned().and_then(ast::Comment::cast), | 87 | next.as_token().cloned().and_then(ast::Comment::cast), |
88 | ) { | 88 | ) { |
89 | // Removes: newline (incl. surrounding whitespace), start of the next comment | 89 | // Removes: newline (incl. surrounding whitespace), start of the next comment |
90 | edit.delete(TextRange::from_to( | 90 | edit.delete(TextRange::new( |
91 | token.text_range().start(), | 91 | token.text_range().start(), |
92 | next.syntax().text_range().start() + TextUnit::of_str(next.prefix()), | 92 | next.syntax().text_range().start() + TextSize::of(next.prefix()), |
93 | )); | 93 | )); |
94 | return; | 94 | return; |
95 | } | 95 | } |
@@ -420,10 +420,10 @@ fn foo() { | |||
420 | check_join_lines( | 420 | check_join_lines( |
421 | r" | 421 | r" |
422 | <|>use ra_syntax::{ | 422 | <|>use ra_syntax::{ |
423 | TextUnit, TextRange, | 423 | TextSize, TextRange, |
424 | };", | 424 | };", |
425 | r" | 425 | r" |
426 | <|>use ra_syntax::{TextUnit, TextRange, | 426 | <|>use ra_syntax::{TextSize, TextRange, |
427 | };", | 427 | };", |
428 | ); | 428 | ); |
429 | } | 429 | } |
@@ -434,11 +434,11 @@ fn foo() { | |||
434 | check_join_lines( | 434 | check_join_lines( |
435 | r" | 435 | r" |
436 | use ra_syntax::{ | 436 | use ra_syntax::{ |
437 | <|> TextUnit, TextRange | 437 | <|> TextSize, TextRange |
438 | };", | 438 | };", |
439 | r" | 439 | r" |
440 | use ra_syntax::{ | 440 | use ra_syntax::{ |
441 | <|> TextUnit, TextRange};", | 441 | <|> TextSize, TextRange};", |
442 | ); | 442 | ); |
443 | } | 443 | } |
444 | 444 | ||
@@ -448,11 +448,11 @@ use ra_syntax::{ | |||
448 | check_join_lines( | 448 | check_join_lines( |
449 | r" | 449 | r" |
450 | use ra_syntax::{ | 450 | use ra_syntax::{ |
451 | <|> TextUnit, TextRange, | 451 | <|> TextSize, TextRange, |
452 | };", | 452 | };", |
453 | r" | 453 | r" |
454 | use ra_syntax::{ | 454 | use ra_syntax::{ |
455 | <|> TextUnit, TextRange};", | 455 | <|> TextSize, TextRange};", |
456 | ); | 456 | ); |
457 | } | 457 | } |
458 | 458 | ||
diff --git a/crates/ra_ide/src/lib.rs b/crates/ra_ide/src/lib.rs index f692fbaa2..09f602fe1 100644 --- a/crates/ra_ide/src/lib.rs +++ b/crates/ra_ide/src/lib.rs | |||
@@ -60,7 +60,7 @@ use ra_ide_db::{ | |||
60 | symbol_index::{self, FileSymbol}, | 60 | symbol_index::{self, FileSymbol}, |
61 | LineIndexDatabase, | 61 | LineIndexDatabase, |
62 | }; | 62 | }; |
63 | use ra_syntax::{SourceFile, TextRange, TextUnit}; | 63 | use ra_syntax::{SourceFile, TextRange, TextSize}; |
64 | 64 | ||
65 | use crate::display::ToNav; | 65 | use crate::display::ToNav; |
66 | 66 | ||
@@ -265,7 +265,7 @@ impl Analysis { | |||
265 | 265 | ||
266 | /// Returns position of the matching brace (all types of braces are | 266 | /// Returns position of the matching brace (all types of braces are |
267 | /// supported). | 267 | /// supported). |
268 | pub fn matching_brace(&self, position: FilePosition) -> Cancelable<Option<TextUnit>> { | 268 | pub fn matching_brace(&self, position: FilePosition) -> Cancelable<Option<TextSize>> { |
269 | self.with_db(|db| { | 269 | self.with_db(|db| { |
270 | let parse = db.parse(position.file_id); | 270 | let parse = db.parse(position.file_id); |
271 | let file = parse.tree(); | 271 | let file = parse.tree(); |
diff --git a/crates/ra_ide/src/matching_brace.rs b/crates/ra_ide/src/matching_brace.rs index d1204fac0..b85348706 100644 --- a/crates/ra_ide/src/matching_brace.rs +++ b/crates/ra_ide/src/matching_brace.rs | |||
@@ -1,8 +1,8 @@ | |||
1 | //! FIXME: write short doc here | 1 | //! FIXME: write short doc here |
2 | 2 | ||
3 | use ra_syntax::{ast::AstNode, SourceFile, SyntaxKind, TextUnit, T}; | 3 | use ra_syntax::{ast::AstNode, SourceFile, SyntaxKind, TextSize, T}; |
4 | 4 | ||
5 | pub fn matching_brace(file: &SourceFile, offset: TextUnit) -> Option<TextUnit> { | 5 | pub fn matching_brace(file: &SourceFile, offset: TextSize) -> Option<TextSize> { |
6 | const BRACES: &[SyntaxKind] = | 6 | const BRACES: &[SyntaxKind] = |
7 | &[T!['{'], T!['}'], T!['['], T![']'], T!['('], T![')'], T![<], T![>]]; | 7 | &[T!['{'], T!['}'], T!['['], T![']'], T!['('], T![')'], T![<], T![>]]; |
8 | let (brace_node, brace_idx) = file | 8 | let (brace_node, brace_idx) = file |
diff --git a/crates/ra_ide/src/references/rename.rs b/crates/ra_ide/src/references/rename.rs index 9acc6158a..1c64b3eb9 100644 --- a/crates/ra_ide/src/references/rename.rs +++ b/crates/ra_ide/src/references/rename.rs | |||
@@ -54,7 +54,7 @@ fn source_edit_from_reference(reference: Reference, new_name: &str) -> SourceFil | |||
54 | ReferenceKind::StructFieldShorthandForField => { | 54 | ReferenceKind::StructFieldShorthandForField => { |
55 | replacement_text.push_str(new_name); | 55 | replacement_text.push_str(new_name); |
56 | replacement_text.push_str(": "); | 56 | replacement_text.push_str(": "); |
57 | TextRange::from_to( | 57 | TextRange::new( |
58 | reference.file_range.range.start(), | 58 | reference.file_range.range.start(), |
59 | reference.file_range.range.start(), | 59 | reference.file_range.range.start(), |
60 | ) | 60 | ) |
@@ -62,7 +62,7 @@ fn source_edit_from_reference(reference: Reference, new_name: &str) -> SourceFil | |||
62 | ReferenceKind::StructFieldShorthandForLocal => { | 62 | ReferenceKind::StructFieldShorthandForLocal => { |
63 | replacement_text.push_str(": "); | 63 | replacement_text.push_str(": "); |
64 | replacement_text.push_str(new_name); | 64 | replacement_text.push_str(new_name); |
65 | TextRange::from_to(reference.file_range.range.end(), reference.file_range.range.end()) | 65 | TextRange::new(reference.file_range.range.end(), reference.file_range.range.end()) |
66 | } | 66 | } |
67 | _ => { | 67 | _ => { |
68 | replacement_text.push_str(new_name); | 68 | replacement_text.push_str(new_name); |
diff --git a/crates/ra_ide/src/source_change.rs b/crates/ra_ide/src/source_change.rs index f5f7f8807..71b0e8f75 100644 --- a/crates/ra_ide/src/source_change.rs +++ b/crates/ra_ide/src/source_change.rs | |||
@@ -6,7 +6,7 @@ | |||
6 | use ra_db::RelativePathBuf; | 6 | use ra_db::RelativePathBuf; |
7 | use ra_text_edit::TextEdit; | 7 | use ra_text_edit::TextEdit; |
8 | 8 | ||
9 | use crate::{FileId, FilePosition, SourceRootId, TextUnit}; | 9 | use crate::{FileId, FilePosition, SourceRootId, TextSize}; |
10 | 10 | ||
11 | #[derive(Debug)] | 11 | #[derive(Debug)] |
12 | pub struct SourceChange { | 12 | pub struct SourceChange { |
@@ -104,7 +104,7 @@ pub enum FileSystemEdit { | |||
104 | pub(crate) struct SingleFileChange { | 104 | pub(crate) struct SingleFileChange { |
105 | pub label: String, | 105 | pub label: String, |
106 | pub edit: TextEdit, | 106 | pub edit: TextEdit, |
107 | pub cursor_position: Option<TextUnit>, | 107 | pub cursor_position: Option<TextSize>, |
108 | } | 108 | } |
109 | 109 | ||
110 | impl SingleFileChange { | 110 | impl SingleFileChange { |
diff --git a/crates/ra_ide/src/syntax_highlighting.rs b/crates/ra_ide/src/syntax_highlighting.rs index c0728bfb1..6f02614a6 100644 --- a/crates/ra_ide/src/syntax_highlighting.rs +++ b/crates/ra_ide/src/syntax_highlighting.rs | |||
@@ -61,16 +61,16 @@ impl HighlightedRangeStack { | |||
61 | let prev = self.stack.last_mut().unwrap(); | 61 | let prev = self.stack.last_mut().unwrap(); |
62 | let needs_flattening = !children.is_empty() | 62 | let needs_flattening = !children.is_empty() |
63 | && !prev.is_empty() | 63 | && !prev.is_empty() |
64 | && children.first().unwrap().range.is_subrange(&prev.last().unwrap().range); | 64 | && prev.last().unwrap().range.contains_range(children.first().unwrap().range); |
65 | if !needs_flattening { | 65 | if !needs_flattening { |
66 | prev.extend(children); | 66 | prev.extend(children); |
67 | } else { | 67 | } else { |
68 | let mut parent = prev.pop().unwrap(); | 68 | let mut parent = prev.pop().unwrap(); |
69 | for ele in children { | 69 | for ele in children { |
70 | assert!(ele.range.is_subrange(&parent.range)); | 70 | assert!(parent.range.contains_range(ele.range)); |
71 | let mut cloned = parent.clone(); | 71 | let mut cloned = parent.clone(); |
72 | parent.range = TextRange::from_to(parent.range.start(), ele.range.start()); | 72 | parent.range = TextRange::new(parent.range.start(), ele.range.start()); |
73 | cloned.range = TextRange::from_to(ele.range.end(), cloned.range.end()); | 73 | cloned.range = TextRange::new(ele.range.end(), cloned.range.end()); |
74 | if !parent.range.is_empty() { | 74 | if !parent.range.is_empty() { |
75 | prev.push(parent); | 75 | prev.push(parent); |
76 | } | 76 | } |
@@ -152,7 +152,7 @@ pub(crate) fn highlight( | |||
152 | }; | 152 | }; |
153 | 153 | ||
154 | // Element outside of the viewport, no need to highlight | 154 | // Element outside of the viewport, no need to highlight |
155 | if range_to_highlight.intersection(&event_range).is_none() { | 155 | if range_to_highlight.intersect(event_range).is_none() { |
156 | continue; | 156 | continue; |
157 | } | 157 | } |
158 | 158 | ||
@@ -309,7 +309,7 @@ fn macro_call_range(macro_call: &ast::MacroCall) -> Option<TextRange> { | |||
309 | } | 309 | } |
310 | } | 310 | } |
311 | 311 | ||
312 | Some(TextRange::from_to(range_start, range_end)) | 312 | Some(TextRange::new(range_start, range_end)) |
313 | } | 313 | } |
314 | 314 | ||
315 | fn highlight_element( | 315 | fn highlight_element( |
diff --git a/crates/ra_ide/src/syntax_highlighting/html.rs b/crates/ra_ide/src/syntax_highlighting/html.rs index 4496529a1..4f17d1040 100644 --- a/crates/ra_ide/src/syntax_highlighting/html.rs +++ b/crates/ra_ide/src/syntax_highlighting/html.rs | |||
@@ -1,7 +1,7 @@ | |||
1 | //! Renders a bit of code as HTML. | 1 | //! Renders a bit of code as HTML. |
2 | 2 | ||
3 | use ra_db::SourceDatabase; | 3 | use ra_db::SourceDatabase; |
4 | use ra_syntax::{AstNode, TextUnit}; | 4 | use ra_syntax::{AstNode, TextSize}; |
5 | 5 | ||
6 | use crate::{FileId, RootDatabase}; | 6 | use crate::{FileId, RootDatabase}; |
7 | 7 | ||
@@ -23,17 +23,18 @@ pub(crate) fn highlight_as_html(db: &RootDatabase, file_id: FileId, rainbow: boo | |||
23 | 23 | ||
24 | let ranges = highlight(db, file_id, None); | 24 | let ranges = highlight(db, file_id, None); |
25 | let text = parse.tree().syntax().to_string(); | 25 | let text = parse.tree().syntax().to_string(); |
26 | let mut prev_pos = TextUnit::from(0); | 26 | let mut prev_pos = TextSize::from(0); |
27 | let mut buf = String::new(); | 27 | let mut buf = String::new(); |
28 | buf.push_str(&STYLE); | 28 | buf.push_str(&STYLE); |
29 | buf.push_str("<pre><code>"); | 29 | buf.push_str("<pre><code>"); |
30 | // TODO: unusize | ||
30 | for range in &ranges { | 31 | for range in &ranges { |
31 | if range.range.start() > prev_pos { | 32 | if range.range.start() > prev_pos { |
32 | let curr = &text[prev_pos.to_usize()..range.range.start().to_usize()]; | 33 | let curr = &text[usize::from(prev_pos)..usize::from(range.range.start())]; |
33 | let text = html_escape(curr); | 34 | let text = html_escape(curr); |
34 | buf.push_str(&text); | 35 | buf.push_str(&text); |
35 | } | 36 | } |
36 | let curr = &text[range.range.start().to_usize()..range.range.end().to_usize()]; | 37 | let curr = &text[usize::from(range.range.start())..usize::from(range.range.end())]; |
37 | 38 | ||
38 | let class = range.highlight.to_string().replace('.', " "); | 39 | let class = range.highlight.to_string().replace('.', " "); |
39 | let color = match (rainbow, range.binding_hash) { | 40 | let color = match (rainbow, range.binding_hash) { |
@@ -47,7 +48,7 @@ pub(crate) fn highlight_as_html(db: &RootDatabase, file_id: FileId, rainbow: boo | |||
47 | prev_pos = range.range.end(); | 48 | prev_pos = range.range.end(); |
48 | } | 49 | } |
49 | // Add the remaining (non-highlighted) text | 50 | // Add the remaining (non-highlighted) text |
50 | let curr = &text[prev_pos.to_usize()..]; | 51 | let curr = &text[usize::from(prev_pos)..]; |
51 | let text = html_escape(curr); | 52 | let text = html_escape(curr); |
52 | buf.push_str(&text); | 53 | buf.push_str(&text); |
53 | buf.push_str("</code></pre>"); | 54 | buf.push_str("</code></pre>"); |
diff --git a/crates/ra_ide/src/syntax_tree.rs b/crates/ra_ide/src/syntax_tree.rs index 5842ae2e8..a8a97a69f 100644 --- a/crates/ra_ide/src/syntax_tree.rs +++ b/crates/ra_ide/src/syntax_tree.rs | |||
@@ -5,7 +5,7 @@ use ra_ide_db::RootDatabase; | |||
5 | use ra_syntax::{ | 5 | use ra_syntax::{ |
6 | algo, AstNode, NodeOrToken, SourceFile, | 6 | algo, AstNode, NodeOrToken, SourceFile, |
7 | SyntaxKind::{RAW_STRING, STRING}, | 7 | SyntaxKind::{RAW_STRING, STRING}, |
8 | SyntaxToken, TextRange, TextUnit, | 8 | SyntaxToken, TextRange, TextSize, |
9 | }; | 9 | }; |
10 | 10 | ||
11 | pub use ra_db::FileId; | 11 | pub use ra_db::FileId; |
@@ -66,13 +66,13 @@ fn syntax_tree_for_token(node: &SyntaxToken, text_range: TextRange) -> Option<St | |||
66 | let len = len.min(node_len); | 66 | let len = len.min(node_len); |
67 | 67 | ||
68 | // Ensure our slice is inside the actual string | 68 | // Ensure our slice is inside the actual string |
69 | let end = if start + len < TextUnit::of_str(&text) { | 69 | let end = if start + len < TextSize::of(&text) { |
70 | start + len | 70 | start + len |
71 | } else { | 71 | } else { |
72 | TextUnit::of_str(&text) - start | 72 | TextSize::of(&text) - start |
73 | }; | 73 | }; |
74 | 74 | ||
75 | let text = &text[TextRange::from_to(start, end)]; | 75 | let text = &text[TextRange::new(start, end)]; |
76 | 76 | ||
77 | // Remove possible extra string quotes from the start | 77 | // Remove possible extra string quotes from the start |
78 | // and the end of the string | 78 | // and the end of the string |
diff --git a/crates/ra_ide/src/test_utils.rs b/crates/ra_ide/src/test_utils.rs index 8adb214d4..f14533e14 100644 --- a/crates/ra_ide/src/test_utils.rs +++ b/crates/ra_ide/src/test_utils.rs | |||
@@ -1,11 +1,11 @@ | |||
1 | //! FIXME: write short doc here | 1 | //! FIXME: write short doc here |
2 | 2 | ||
3 | use ra_syntax::{SourceFile, TextUnit}; | 3 | use ra_syntax::{SourceFile, TextSize}; |
4 | use ra_text_edit::TextEdit; | 4 | use ra_text_edit::TextEdit; |
5 | 5 | ||
6 | pub use test_utils::*; | 6 | pub use test_utils::*; |
7 | 7 | ||
8 | pub fn check_action<F: Fn(&SourceFile, TextUnit) -> Option<TextEdit>>( | 8 | pub fn check_action<F: Fn(&SourceFile, TextSize) -> Option<TextEdit>>( |
9 | before: &str, | 9 | before: &str, |
10 | after: &str, | 10 | after: &str, |
11 | f: F, | 11 | f: F, |
diff --git a/crates/ra_ide/src/typing.rs b/crates/ra_ide/src/typing.rs index f55cd3bf5..98af79dff 100644 --- a/crates/ra_ide/src/typing.rs +++ b/crates/ra_ide/src/typing.rs | |||
@@ -21,7 +21,7 @@ use ra_ide_db::RootDatabase; | |||
21 | use ra_syntax::{ | 21 | use ra_syntax::{ |
22 | algo::find_node_at_offset, | 22 | algo::find_node_at_offset, |
23 | ast::{self, AstToken}, | 23 | ast::{self, AstToken}, |
24 | AstNode, SourceFile, TextRange, TextUnit, | 24 | AstNode, SourceFile, TextRange, TextSize, |
25 | }; | 25 | }; |
26 | use ra_text_edit::TextEdit; | 26 | use ra_text_edit::TextEdit; |
27 | 27 | ||
@@ -45,7 +45,7 @@ pub(crate) fn on_char_typed( | |||
45 | 45 | ||
46 | fn on_char_typed_inner( | 46 | fn on_char_typed_inner( |
47 | file: &SourceFile, | 47 | file: &SourceFile, |
48 | offset: TextUnit, | 48 | offset: TextSize, |
49 | char_typed: char, | 49 | char_typed: char, |
50 | ) -> Option<SingleFileChange> { | 50 | ) -> Option<SingleFileChange> { |
51 | assert!(TRIGGER_CHARS.contains(char_typed)); | 51 | assert!(TRIGGER_CHARS.contains(char_typed)); |
@@ -60,7 +60,7 @@ fn on_char_typed_inner( | |||
60 | /// Returns an edit which should be applied after `=` was typed. Primarily, | 60 | /// Returns an edit which should be applied after `=` was typed. Primarily, |
61 | /// this works when adding `let =`. | 61 | /// this works when adding `let =`. |
62 | // FIXME: use a snippet completion instead of this hack here. | 62 | // FIXME: use a snippet completion instead of this hack here. |
63 | fn on_eq_typed(file: &SourceFile, offset: TextUnit) -> Option<SingleFileChange> { | 63 | fn on_eq_typed(file: &SourceFile, offset: TextSize) -> Option<SingleFileChange> { |
64 | assert_eq!(file.syntax().text().char_at(offset), Some('=')); | 64 | assert_eq!(file.syntax().text().char_at(offset), Some('=')); |
65 | let let_stmt: ast::LetStmt = find_node_at_offset(file.syntax(), offset)?; | 65 | let let_stmt: ast::LetStmt = find_node_at_offset(file.syntax(), offset)?; |
66 | if let_stmt.semicolon_token().is_some() { | 66 | if let_stmt.semicolon_token().is_some() { |
@@ -86,7 +86,7 @@ fn on_eq_typed(file: &SourceFile, offset: TextUnit) -> Option<SingleFileChange> | |||
86 | } | 86 | } |
87 | 87 | ||
88 | /// Returns an edit which should be applied when a dot ('.') is typed on a blank line, indenting the line appropriately. | 88 | /// Returns an edit which should be applied when a dot ('.') is typed on a blank line, indenting the line appropriately. |
89 | fn on_dot_typed(file: &SourceFile, offset: TextUnit) -> Option<SingleFileChange> { | 89 | fn on_dot_typed(file: &SourceFile, offset: TextSize) -> Option<SingleFileChange> { |
90 | assert_eq!(file.syntax().text().char_at(offset), Some('.')); | 90 | assert_eq!(file.syntax().text().char_at(offset), Some('.')); |
91 | let whitespace = | 91 | let whitespace = |
92 | file.syntax().token_at_offset(offset).left_biased().and_then(ast::Whitespace::cast)?; | 92 | file.syntax().token_at_offset(offset).left_biased().and_then(ast::Whitespace::cast)?; |
@@ -96,13 +96,13 @@ fn on_dot_typed(file: &SourceFile, offset: TextUnit) -> Option<SingleFileChange> | |||
96 | let newline = text.rfind('\n')?; | 96 | let newline = text.rfind('\n')?; |
97 | &text[newline + 1..] | 97 | &text[newline + 1..] |
98 | }; | 98 | }; |
99 | let current_indent_len = TextUnit::of_str(current_indent); | 99 | let current_indent_len = TextSize::of(current_indent); |
100 | 100 | ||
101 | // Make sure dot is a part of call chain | 101 | // Make sure dot is a part of call chain |
102 | let field_expr = ast::FieldExpr::cast(whitespace.syntax().parent())?; | 102 | let field_expr = ast::FieldExpr::cast(whitespace.syntax().parent())?; |
103 | let prev_indent = leading_indent(field_expr.syntax())?; | 103 | let prev_indent = leading_indent(field_expr.syntax())?; |
104 | let target_indent = format!(" {}", prev_indent); | 104 | let target_indent = format!(" {}", prev_indent); |
105 | let target_indent_len = TextUnit::of_str(&target_indent); | 105 | let target_indent_len = TextSize::of(&target_indent); |
106 | if current_indent_len == target_indent_len { | 106 | if current_indent_len == target_indent_len { |
107 | return None; | 107 | return None; |
108 | } | 108 | } |
@@ -110,20 +110,20 @@ fn on_dot_typed(file: &SourceFile, offset: TextUnit) -> Option<SingleFileChange> | |||
110 | Some(SingleFileChange { | 110 | Some(SingleFileChange { |
111 | label: "reindent dot".to_string(), | 111 | label: "reindent dot".to_string(), |
112 | edit: TextEdit::replace( | 112 | edit: TextEdit::replace( |
113 | TextRange::from_to(offset - current_indent_len, offset), | 113 | TextRange::new(offset - current_indent_len, offset), |
114 | target_indent, | 114 | target_indent, |
115 | ), | 115 | ), |
116 | cursor_position: Some( | 116 | cursor_position: Some( |
117 | offset + target_indent_len - current_indent_len + TextUnit::of_char('.'), | 117 | offset + target_indent_len - current_indent_len + TextSize::of('.'), |
118 | ), | 118 | ), |
119 | }) | 119 | }) |
120 | } | 120 | } |
121 | 121 | ||
122 | /// Adds a space after an arrow when `fn foo() { ... }` is turned into `fn foo() -> { ... }` | 122 | /// Adds a space after an arrow when `fn foo() { ... }` is turned into `fn foo() -> { ... }` |
123 | fn on_arrow_typed(file: &SourceFile, offset: TextUnit) -> Option<SingleFileChange> { | 123 | fn on_arrow_typed(file: &SourceFile, offset: TextSize) -> Option<SingleFileChange> { |
124 | let file_text = file.syntax().text(); | 124 | let file_text = file.syntax().text(); |
125 | assert_eq!(file_text.char_at(offset), Some('>')); | 125 | assert_eq!(file_text.char_at(offset), Some('>')); |
126 | let after_arrow = offset + TextUnit::of_char('>'); | 126 | let after_arrow = offset + TextSize::of('>'); |
127 | if file_text.char_at(after_arrow) != Some('{') { | 127 | if file_text.char_at(after_arrow) != Some('{') { |
128 | return None; | 128 | return None; |
129 | } | 129 | } |
diff --git a/crates/ra_ide/src/typing/on_enter.rs b/crates/ra_ide/src/typing/on_enter.rs index 6bcf2d72b..30c8c5572 100644 --- a/crates/ra_ide/src/typing/on_enter.rs +++ b/crates/ra_ide/src/typing/on_enter.rs | |||
@@ -7,7 +7,7 @@ use ra_syntax::{ | |||
7 | ast::{self, AstToken}, | 7 | ast::{self, AstToken}, |
8 | AstNode, SmolStr, SourceFile, | 8 | AstNode, SmolStr, SourceFile, |
9 | SyntaxKind::*, | 9 | SyntaxKind::*, |
10 | SyntaxToken, TextUnit, TokenAtOffset, | 10 | SyntaxToken, TextSize, TokenAtOffset, |
11 | }; | 11 | }; |
12 | use ra_text_edit::TextEdit; | 12 | use ra_text_edit::TextEdit; |
13 | 13 | ||
@@ -28,7 +28,7 @@ pub(crate) fn on_enter(db: &RootDatabase, position: FilePosition) -> Option<Sour | |||
28 | 28 | ||
29 | let prefix = comment.prefix(); | 29 | let prefix = comment.prefix(); |
30 | let comment_range = comment.syntax().text_range(); | 30 | let comment_range = comment.syntax().text_range(); |
31 | if position.offset < comment_range.start() + TextUnit::of_str(prefix) { | 31 | if position.offset < comment_range.start() + TextSize::of(prefix) { |
32 | return None; | 32 | return None; |
33 | } | 33 | } |
34 | 34 | ||
@@ -39,7 +39,7 @@ pub(crate) fn on_enter(db: &RootDatabase, position: FilePosition) -> Option<Sour | |||
39 | 39 | ||
40 | let indent = node_indent(&file, comment.syntax())?; | 40 | let indent = node_indent(&file, comment.syntax())?; |
41 | let inserted = format!("\n{}{} ", indent, prefix); | 41 | let inserted = format!("\n{}{} ", indent, prefix); |
42 | let cursor_position = position.offset + TextUnit::of_str(&inserted); | 42 | let cursor_position = position.offset + TextSize::of(&inserted); |
43 | let edit = TextEdit::insert(position.offset, inserted); | 43 | let edit = TextEdit::insert(position.offset, inserted); |
44 | 44 | ||
45 | Some( | 45 | Some( |
diff --git a/crates/ra_ide_db/src/line_index.rs b/crates/ra_ide_db/src/line_index.rs index 8ae745ff2..7794dc9fd 100644 --- a/crates/ra_ide_db/src/line_index.rs +++ b/crates/ra_ide_db/src/line_index.rs | |||
@@ -1,14 +1,14 @@ | |||
1 | //! `LineIndex` maps flat `TextUnit` offsets into `(Line, Column)` | 1 | //! `LineIndex` maps flat `TextSize` offsets into `(Line, Column)` |
2 | //! representation. | 2 | //! representation. |
3 | use std::iter; | 3 | use std::iter; |
4 | 4 | // TODO: un TextSize | |
5 | use ra_syntax::{TextRange, TextUnit}; | 5 | use ra_syntax::{TextRange, TextSize}; |
6 | use rustc_hash::FxHashMap; | 6 | use rustc_hash::FxHashMap; |
7 | use superslice::Ext; | 7 | use superslice::Ext; |
8 | 8 | ||
9 | #[derive(Clone, Debug, PartialEq, Eq)] | 9 | #[derive(Clone, Debug, PartialEq, Eq)] |
10 | pub struct LineIndex { | 10 | pub struct LineIndex { |
11 | pub(crate) newlines: Vec<TextUnit>, | 11 | pub(crate) newlines: Vec<TextSize>, |
12 | pub(crate) utf16_lines: FxHashMap<u32, Vec<Utf16Char>>, | 12 | pub(crate) utf16_lines: FxHashMap<u32, Vec<Utf16Char>>, |
13 | } | 13 | } |
14 | 14 | ||
@@ -22,12 +22,12 @@ pub struct LineCol { | |||
22 | 22 | ||
23 | #[derive(Clone, Debug, Hash, PartialEq, Eq)] | 23 | #[derive(Clone, Debug, Hash, PartialEq, Eq)] |
24 | pub(crate) struct Utf16Char { | 24 | pub(crate) struct Utf16Char { |
25 | pub(crate) start: TextUnit, | 25 | pub(crate) start: TextSize, |
26 | pub(crate) end: TextUnit, | 26 | pub(crate) end: TextSize, |
27 | } | 27 | } |
28 | 28 | ||
29 | impl Utf16Char { | 29 | impl Utf16Char { |
30 | fn len(&self) -> TextUnit { | 30 | fn len(&self) -> TextSize { |
31 | self.end - self.start | 31 | self.end - self.start |
32 | } | 32 | } |
33 | } | 33 | } |
@@ -42,7 +42,7 @@ impl LineIndex { | |||
42 | let mut curr_col = 0.into(); | 42 | let mut curr_col = 0.into(); |
43 | let mut line = 0; | 43 | let mut line = 0; |
44 | for c in text.chars() { | 44 | for c in text.chars() { |
45 | curr_row += TextUnit::of_char(c); | 45 | curr_row += TextSize::of(c); |
46 | if c == '\n' { | 46 | if c == '\n' { |
47 | newlines.push(curr_row); | 47 | newlines.push(curr_row); |
48 | 48 | ||
@@ -58,8 +58,8 @@ impl LineIndex { | |||
58 | continue; | 58 | continue; |
59 | } | 59 | } |
60 | 60 | ||
61 | let char_len = TextUnit::of_char(c); | 61 | let char_len = TextSize::of(c); |
62 | if char_len > TextUnit::from_usize(1) { | 62 | if char_len > TextSize::from_usize(1) { |
63 | utf16_chars.push(Utf16Char { start: curr_col, end: curr_col + char_len }); | 63 | utf16_chars.push(Utf16Char { start: curr_col, end: curr_col + char_len }); |
64 | } | 64 | } |
65 | 65 | ||
@@ -74,7 +74,7 @@ impl LineIndex { | |||
74 | LineIndex { newlines, utf16_lines } | 74 | LineIndex { newlines, utf16_lines } |
75 | } | 75 | } |
76 | 76 | ||
77 | pub fn line_col(&self, offset: TextUnit) -> LineCol { | 77 | pub fn line_col(&self, offset: TextSize) -> LineCol { |
78 | let line = self.newlines.upper_bound(&offset) - 1; | 78 | let line = self.newlines.upper_bound(&offset) - 1; |
79 | let line_start_offset = self.newlines[line]; | 79 | let line_start_offset = self.newlines[line]; |
80 | let col = offset - line_start_offset; | 80 | let col = offset - line_start_offset; |
@@ -82,7 +82,7 @@ impl LineIndex { | |||
82 | LineCol { line: line as u32, col_utf16: self.utf8_to_utf16_col(line as u32, col) as u32 } | 82 | LineCol { line: line as u32, col_utf16: self.utf8_to_utf16_col(line as u32, col) as u32 } |
83 | } | 83 | } |
84 | 84 | ||
85 | pub fn offset(&self, line_col: LineCol) -> TextUnit { | 85 | pub fn offset(&self, line_col: LineCol) -> TextSize { |
86 | //FIXME: return Result | 86 | //FIXME: return Result |
87 | let col = self.utf16_to_utf8_col(line_col.line, line_col.col_utf16); | 87 | let col = self.utf16_to_utf8_col(line_col.line, line_col.col_utf16); |
88 | self.newlines[line_col.line as usize] + col | 88 | self.newlines[line_col.line as usize] + col |
@@ -97,16 +97,16 @@ impl LineIndex { | |||
97 | 97 | ||
98 | all.clone() | 98 | all.clone() |
99 | .zip(all.skip(1)) | 99 | .zip(all.skip(1)) |
100 | .map(|(lo, hi)| TextRange::from_to(lo, hi)) | 100 | .map(|(lo, hi)| TextRange::new(lo, hi)) |
101 | .filter(|it| !it.is_empty()) | 101 | .filter(|it| !it.is_empty()) |
102 | } | 102 | } |
103 | 103 | ||
104 | fn utf8_to_utf16_col(&self, line: u32, col: TextUnit) -> usize { | 104 | fn utf8_to_utf16_col(&self, line: u32, col: TextSize) -> usize { |
105 | if let Some(utf16_chars) = self.utf16_lines.get(&line) { | 105 | if let Some(utf16_chars) = self.utf16_lines.get(&line) { |
106 | let mut correction = 0; | 106 | let mut correction = 0; |
107 | for c in utf16_chars { | 107 | for c in utf16_chars { |
108 | if col >= c.end { | 108 | if col >= c.end { |
109 | correction += c.len().to_usize() - 1; | 109 | correction += usize::from(c.len()) - 1; |
110 | } else { | 110 | } else { |
111 | // From here on, all utf16 characters come *after* the character we are mapping, | 111 | // From here on, all utf16 characters come *after* the character we are mapping, |
112 | // so we don't need to take them into account | 112 | // so we don't need to take them into account |
@@ -114,18 +114,18 @@ impl LineIndex { | |||
114 | } | 114 | } |
115 | } | 115 | } |
116 | 116 | ||
117 | col.to_usize() - correction | 117 | usize::from(col) - correction |
118 | } else { | 118 | } else { |
119 | col.to_usize() | 119 | usize::from(col) |
120 | } | 120 | } |
121 | } | 121 | } |
122 | 122 | ||
123 | fn utf16_to_utf8_col(&self, line: u32, col: u32) -> TextUnit { | 123 | fn utf16_to_utf8_col(&self, line: u32, col: u32) -> TextSize { |
124 | let mut col: TextUnit = col.into(); | 124 | let mut col: TextSize = col.into(); |
125 | if let Some(utf16_chars) = self.utf16_lines.get(&line) { | 125 | if let Some(utf16_chars) = self.utf16_lines.get(&line) { |
126 | for c in utf16_chars { | 126 | for c in utf16_chars { |
127 | if col >= c.start { | 127 | if col >= c.start { |
128 | col += c.len() - TextUnit::from_usize(1); | 128 | col += c.len() - TextSize::from_usize(1); |
129 | } else { | 129 | } else { |
130 | // From here on, all utf16 characters come *after* the character we are mapping, | 130 | // From here on, all utf16 characters come *after* the character we are mapping, |
131 | // so we don't need to take them into account | 131 | // so we don't need to take them into account |
@@ -200,10 +200,10 @@ const C: char = 'メ'; | |||
200 | assert_eq!(col_index.utf8_to_utf16_col(1, 22.into()), 20); | 200 | assert_eq!(col_index.utf8_to_utf16_col(1, 22.into()), 20); |
201 | 201 | ||
202 | // UTF-16 to UTF-8, no changes | 202 | // UTF-16 to UTF-8, no changes |
203 | assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextUnit::from(15)); | 203 | assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextSize::from(15)); |
204 | 204 | ||
205 | // UTF-16 to UTF-8 | 205 | // UTF-16 to UTF-8 |
206 | assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextUnit::from(21)); | 206 | assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextSize::from(21)); |
207 | } | 207 | } |
208 | 208 | ||
209 | #[test] | 209 | #[test] |
@@ -228,18 +228,18 @@ const C: char = \"メ メ\"; | |||
228 | assert!(col_index.utf8_to_utf16_col(2, 15.into()) == 15); | 228 | assert!(col_index.utf8_to_utf16_col(2, 15.into()) == 15); |
229 | 229 | ||
230 | // UTF-16 to UTF-8 | 230 | // UTF-16 to UTF-8 |
231 | assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextUnit::from_usize(15)); | 231 | assert_eq!(col_index.utf16_to_utf8_col(1, 15), TextSize::from_usize(15)); |
232 | 232 | ||
233 | assert_eq!(col_index.utf16_to_utf8_col(1, 18), TextUnit::from_usize(20)); | 233 | assert_eq!(col_index.utf16_to_utf8_col(1, 18), TextSize::from_usize(20)); |
234 | assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextUnit::from_usize(23)); | 234 | assert_eq!(col_index.utf16_to_utf8_col(1, 19), TextSize::from_usize(23)); |
235 | 235 | ||
236 | assert_eq!(col_index.utf16_to_utf8_col(2, 15), TextUnit::from_usize(15)); | 236 | assert_eq!(col_index.utf16_to_utf8_col(2, 15), TextSize::from_usize(15)); |
237 | } | 237 | } |
238 | 238 | ||
239 | #[test] | 239 | #[test] |
240 | fn test_splitlines() { | 240 | fn test_splitlines() { |
241 | fn r(lo: u32, hi: u32) -> TextRange { | 241 | fn r(lo: u32, hi: u32) -> TextRange { |
242 | TextRange::from_to(lo.into(), hi.into()) | 242 | TextRange::new(lo.into(), hi.into()) |
243 | } | 243 | } |
244 | 244 | ||
245 | let text = "a\nbb\nccc\n"; | 245 | let text = "a\nbb\nccc\n"; |
diff --git a/crates/ra_ide_db/src/line_index_utils.rs b/crates/ra_ide_db/src/line_index_utils.rs index 2ebbabdc6..f050fe77f 100644 --- a/crates/ra_ide_db/src/line_index_utils.rs +++ b/crates/ra_ide_db/src/line_index_utils.rs | |||
@@ -1,20 +1,20 @@ | |||
1 | //! Code actions can specify desirable final position of the cursor. | 1 | //! Code actions can specify desirable final position of the cursor. |
2 | //! | 2 | //! |
3 | //! The position is specified as a `TextUnit` in the final file. We need to send | 3 | //! The position is specified as a `TextSize` in the final file. We need to send |
4 | //! it in `(Line, Column)` coordinate though. However, we only have a LineIndex | 4 | //! it in `(Line, Column)` coordinate though. However, we only have a LineIndex |
5 | //! for a file pre-edit! | 5 | //! for a file pre-edit! |
6 | //! | 6 | //! |
7 | //! Code in this module applies this "to (Line, Column) after edit" | 7 | //! Code in this module applies this "to (Line, Column) after edit" |
8 | //! transformation. | 8 | //! transformation. |
9 | 9 | ||
10 | use ra_syntax::{TextRange, TextUnit}; | 10 | use ra_syntax::{TextRange, TextSize}; |
11 | use ra_text_edit::{AtomTextEdit, TextEdit}; | 11 | use ra_text_edit::{AtomTextEdit, TextEdit}; |
12 | 12 | ||
13 | use crate::line_index::{LineCol, LineIndex, Utf16Char}; | 13 | use crate::line_index::{LineCol, LineIndex, Utf16Char}; |
14 | 14 | ||
15 | pub fn translate_offset_with_edit( | 15 | pub fn translate_offset_with_edit( |
16 | line_index: &LineIndex, | 16 | line_index: &LineIndex, |
17 | offset: TextUnit, | 17 | offset: TextSize, |
18 | text_edit: &TextEdit, | 18 | text_edit: &TextEdit, |
19 | ) -> LineCol { | 19 | ) -> LineCol { |
20 | let mut state = Edits::from_text_edit(&text_edit); | 20 | let mut state = Edits::from_text_edit(&text_edit); |
@@ -84,7 +84,7 @@ pub fn translate_offset_with_edit( | |||
84 | 84 | ||
85 | #[derive(Debug, Clone)] | 85 | #[derive(Debug, Clone)] |
86 | enum Step { | 86 | enum Step { |
87 | Newline(TextUnit), | 87 | Newline(TextSize), |
88 | Utf16Char(TextRange), | 88 | Utf16Char(TextRange), |
89 | } | 89 | } |
90 | 90 | ||
@@ -92,7 +92,7 @@ enum Step { | |||
92 | struct LineIndexStepIter<'a> { | 92 | struct LineIndexStepIter<'a> { |
93 | line_index: &'a LineIndex, | 93 | line_index: &'a LineIndex, |
94 | next_newline_idx: usize, | 94 | next_newline_idx: usize, |
95 | utf16_chars: Option<(TextUnit, std::slice::Iter<'a, Utf16Char>)>, | 95 | utf16_chars: Option<(TextSize, std::slice::Iter<'a, Utf16Char>)>, |
96 | } | 96 | } |
97 | 97 | ||
98 | impl LineIndexStepIter<'_> { | 98 | impl LineIndexStepIter<'_> { |
@@ -111,7 +111,7 @@ impl Iterator for LineIndexStepIter<'_> { | |||
111 | .as_mut() | 111 | .as_mut() |
112 | .and_then(|(newline, x)| { | 112 | .and_then(|(newline, x)| { |
113 | let x = x.next()?; | 113 | let x = x.next()?; |
114 | Some(Step::Utf16Char(TextRange::from_to(*newline + x.start, *newline + x.end))) | 114 | Some(Step::Utf16Char(TextRange::new(*newline + x.start, *newline + x.end))) |
115 | }) | 115 | }) |
116 | .or_else(|| { | 116 | .or_else(|| { |
117 | let next_newline = *self.line_index.newlines.get(self.next_newline_idx)?; | 117 | let next_newline = *self.line_index.newlines.get(self.next_newline_idx)?; |
@@ -129,7 +129,7 @@ impl Iterator for LineIndexStepIter<'_> { | |||
129 | #[derive(Debug)] | 129 | #[derive(Debug)] |
130 | struct OffsetStepIter<'a> { | 130 | struct OffsetStepIter<'a> { |
131 | text: &'a str, | 131 | text: &'a str, |
132 | offset: TextUnit, | 132 | offset: TextSize, |
133 | } | 133 | } |
134 | 134 | ||
135 | impl Iterator for OffsetStepIter<'_> { | 135 | impl Iterator for OffsetStepIter<'_> { |
@@ -140,15 +140,15 @@ impl Iterator for OffsetStepIter<'_> { | |||
140 | .char_indices() | 140 | .char_indices() |
141 | .filter_map(|(i, c)| { | 141 | .filter_map(|(i, c)| { |
142 | if c == '\n' { | 142 | if c == '\n' { |
143 | let next_offset = self.offset + TextUnit::from_usize(i + 1); | 143 | let next_offset = self.offset + TextSize::from_usize(i + 1); |
144 | let next = Step::Newline(next_offset); | 144 | let next = Step::Newline(next_offset); |
145 | Some((next, next_offset)) | 145 | Some((next, next_offset)) |
146 | } else { | 146 | } else { |
147 | let char_len = TextUnit::of_char(c); | 147 | let char_len = TextSize::of(c); |
148 | if char_len > TextUnit::from_usize(1) { | 148 | if char_len > TextSize::from_usize(1) { |
149 | let start = self.offset + TextUnit::from_usize(i); | 149 | let start = self.offset + TextSize::from_usize(i); |
150 | let end = start + char_len; | 150 | let end = start + char_len; |
151 | let next = Step::Utf16Char(TextRange::from_to(start, end)); | 151 | let next = Step::Utf16Char(TextRange::new(start, end)); |
152 | let next_offset = end; | 152 | let next_offset = end; |
153 | Some((next, next_offset)) | 153 | Some((next, next_offset)) |
154 | } else { | 154 | } else { |
@@ -157,7 +157,7 @@ impl Iterator for OffsetStepIter<'_> { | |||
157 | } | 157 | } |
158 | }) | 158 | }) |
159 | .next()?; | 159 | .next()?; |
160 | let next_idx = (next_offset - self.offset).to_usize(); | 160 | let next_idx: usize = (next_offset - self.offset).into(); |
161 | self.text = &self.text[next_idx..]; | 161 | self.text = &self.text[next_idx..]; |
162 | self.offset = next_offset; | 162 | self.offset = next_offset; |
163 | Some(next) | 163 | Some(next) |
@@ -195,7 +195,7 @@ impl<'a> Edits<'a> { | |||
195 | match self.edits.split_first() { | 195 | match self.edits.split_first() { |
196 | Some((next, rest)) => { | 196 | Some((next, rest)) => { |
197 | let delete = self.translate_range(next.delete); | 197 | let delete = self.translate_range(next.delete); |
198 | let diff = next.insert.len() as i64 - next.delete.len().to_usize() as i64; | 198 | let diff = next.insert.len() as i64 - usize::from(next.delete.len()) as i64; |
199 | self.current = Some(TranslatedEdit { delete, insert: &next.insert, diff }); | 199 | self.current = Some(TranslatedEdit { delete, insert: &next.insert, diff }); |
200 | self.edits = rest; | 200 | self.edits = rest; |
201 | } | 201 | } |
@@ -244,15 +244,15 @@ impl<'a> Edits<'a> { | |||
244 | } else { | 244 | } else { |
245 | let start = self.translate(range.start()); | 245 | let start = self.translate(range.start()); |
246 | let end = self.translate(range.end()); | 246 | let end = self.translate(range.end()); |
247 | TextRange::from_to(start, end) | 247 | TextRange::new(start, end) |
248 | } | 248 | } |
249 | } | 249 | } |
250 | 250 | ||
251 | fn translate(&self, x: TextUnit) -> TextUnit { | 251 | fn translate(&self, x: TextSize) -> TextSize { |
252 | if self.acc_diff == 0 { | 252 | if self.acc_diff == 0 { |
253 | x | 253 | x |
254 | } else { | 254 | } else { |
255 | TextUnit::from((x.to_usize() as i64 + self.acc_diff) as u32) | 255 | TextSize::from((usize::from(x) as i64 + self.acc_diff) as u32) |
256 | } | 256 | } |
257 | } | 257 | } |
258 | 258 | ||
@@ -271,29 +271,29 @@ impl<'a> Edits<'a> { | |||
271 | #[derive(Debug)] | 271 | #[derive(Debug)] |
272 | struct RunningLineCol { | 272 | struct RunningLineCol { |
273 | line: u32, | 273 | line: u32, |
274 | last_newline: TextUnit, | 274 | last_newline: TextSize, |
275 | col_adjust: TextUnit, | 275 | col_adjust: TextSize, |
276 | } | 276 | } |
277 | 277 | ||
278 | impl RunningLineCol { | 278 | impl RunningLineCol { |
279 | fn new() -> RunningLineCol { | 279 | fn new() -> RunningLineCol { |
280 | RunningLineCol { line: 0, last_newline: TextUnit::from(0), col_adjust: TextUnit::from(0) } | 280 | RunningLineCol { line: 0, last_newline: TextSize::from(0), col_adjust: TextSize::from(0) } |
281 | } | 281 | } |
282 | 282 | ||
283 | fn to_line_col(&self, offset: TextUnit) -> LineCol { | 283 | fn to_line_col(&self, offset: TextSize) -> LineCol { |
284 | LineCol { | 284 | LineCol { |
285 | line: self.line, | 285 | line: self.line, |
286 | col_utf16: ((offset - self.last_newline) - self.col_adjust).into(), | 286 | col_utf16: ((offset - self.last_newline) - self.col_adjust).into(), |
287 | } | 287 | } |
288 | } | 288 | } |
289 | 289 | ||
290 | fn add_line(&mut self, newline: TextUnit) { | 290 | fn add_line(&mut self, newline: TextSize) { |
291 | self.line += 1; | 291 | self.line += 1; |
292 | self.last_newline = newline; | 292 | self.last_newline = newline; |
293 | self.col_adjust = TextUnit::from(0); | 293 | self.col_adjust = TextSize::from(0); |
294 | } | 294 | } |
295 | 295 | ||
296 | fn adjust_col(&mut self, range: TextRange) { | 296 | fn adjust_col(&mut self, range: TextRange) { |
297 | self.col_adjust += range.len() - TextUnit::from(1); | 297 | self.col_adjust += range.len() - TextSize::from(1); |
298 | } | 298 | } |
299 | } | 299 | } |
diff --git a/crates/ra_ide_db/src/search.rs b/crates/ra_ide_db/src/search.rs index 1bf014149..c66de4f42 100644 --- a/crates/ra_ide_db/src/search.rs +++ b/crates/ra_ide_db/src/search.rs | |||
@@ -10,7 +10,7 @@ use hir::{DefWithBody, HasSource, Module, ModuleSource, Semantics, Visibility}; | |||
10 | use once_cell::unsync::Lazy; | 10 | use once_cell::unsync::Lazy; |
11 | use ra_db::{FileId, FileRange, SourceDatabaseExt}; | 11 | use ra_db::{FileId, FileRange, SourceDatabaseExt}; |
12 | use ra_prof::profile; | 12 | use ra_prof::profile; |
13 | use ra_syntax::{ast, match_ast, AstNode, TextRange, TextUnit}; | 13 | use ra_syntax::{ast, match_ast, AstNode, TextRange, TextSize}; |
14 | use rustc_hash::FxHashMap; | 14 | use rustc_hash::FxHashMap; |
15 | use test_utils::tested_by; | 15 | use test_utils::tested_by; |
16 | 16 | ||
@@ -85,7 +85,7 @@ impl SearchScope { | |||
85 | match (r1, r2) { | 85 | match (r1, r2) { |
86 | (None, r) | (r, None) => Some(r), | 86 | (None, r) | (r, None) => Some(r), |
87 | (Some(r1), Some(r2)) => { | 87 | (Some(r1), Some(r2)) => { |
88 | let r = r1.intersection(&r2)?; | 88 | let r = r1.intersect(r2)?; |
89 | Some(Some(r)) | 89 | Some(Some(r)) |
90 | } | 90 | } |
91 | } | 91 | } |
@@ -200,14 +200,13 @@ impl Definition { | |||
200 | 200 | ||
201 | for (file_id, search_range) in search_scope { | 201 | for (file_id, search_range) in search_scope { |
202 | let text = db.file_text(file_id); | 202 | let text = db.file_text(file_id); |
203 | let search_range = | 203 | let search_range = search_range.unwrap_or(TextRange::up_to(TextSize::of(&text))); |
204 | search_range.unwrap_or(TextRange::offset_len(0.into(), TextUnit::of_str(&text))); | ||
205 | 204 | ||
206 | let sema = Semantics::new(db); | 205 | let sema = Semantics::new(db); |
207 | let tree = Lazy::new(|| sema.parse(file_id).syntax().clone()); | 206 | let tree = Lazy::new(|| sema.parse(file_id).syntax().clone()); |
208 | 207 | ||
209 | for (idx, _) in text.match_indices(pat) { | 208 | for (idx, _) in text.match_indices(pat) { |
210 | let offset = TextUnit::from_usize(idx); | 209 | let offset = TextSize::from_usize(idx); |
211 | if !search_range.contains_inclusive(offset) { | 210 | if !search_range.contains_inclusive(offset) { |
212 | tested_by!(search_filters_by_range; force); | 211 | tested_by!(search_filters_by_range; force); |
213 | continue; | 212 | continue; |
diff --git a/crates/ra_mbe/src/syntax_bridge.rs b/crates/ra_mbe/src/syntax_bridge.rs index 2b4390eb2..fa9787266 100644 --- a/crates/ra_mbe/src/syntax_bridge.rs +++ b/crates/ra_mbe/src/syntax_bridge.rs | |||
@@ -5,7 +5,7 @@ use ra_syntax::{ | |||
5 | ast::{self, make::tokens::doc_comment}, | 5 | ast::{self, make::tokens::doc_comment}, |
6 | tokenize, AstToken, Parse, SmolStr, SyntaxKind, | 6 | tokenize, AstToken, Parse, SmolStr, SyntaxKind, |
7 | SyntaxKind::*, | 7 | SyntaxKind::*, |
8 | SyntaxNode, SyntaxToken, SyntaxTreeBuilder, TextRange, TextUnit, Token as RawToken, T, | 8 | SyntaxNode, SyntaxToken, SyntaxTreeBuilder, TextRange, TextSize, Token as RawToken, T, |
9 | }; | 9 | }; |
10 | use rustc_hash::FxHashMap; | 10 | use rustc_hash::FxHashMap; |
11 | use tt::buffer::{Cursor, TokenBuffer}; | 11 | use tt::buffer::{Cursor, TokenBuffer}; |
@@ -99,11 +99,11 @@ pub fn parse_to_token_tree(text: &str) -> Option<(tt::Subtree, TokenMap)> { | |||
99 | 99 | ||
100 | let mut conv = RawConvertor { | 100 | let mut conv = RawConvertor { |
101 | text, | 101 | text, |
102 | offset: TextUnit::default(), | 102 | offset: TextSize::default(), |
103 | inner: tokens.iter(), | 103 | inner: tokens.iter(), |
104 | id_alloc: TokenIdAlloc { | 104 | id_alloc: TokenIdAlloc { |
105 | map: Default::default(), | 105 | map: Default::default(), |
106 | global_offset: TextUnit::default(), | 106 | global_offset: TextSize::default(), |
107 | next_id: 0, | 107 | next_id: 0, |
108 | }, | 108 | }, |
109 | }; | 109 | }; |
@@ -227,7 +227,7 @@ fn convert_doc_comment(token: &ra_syntax::SyntaxToken) -> Option<Vec<tt::TokenTr | |||
227 | 227 | ||
228 | struct TokenIdAlloc { | 228 | struct TokenIdAlloc { |
229 | map: TokenMap, | 229 | map: TokenMap, |
230 | global_offset: TextUnit, | 230 | global_offset: TextSize, |
231 | next_id: u32, | 231 | next_id: u32, |
232 | } | 232 | } |
233 | 233 | ||
@@ -266,7 +266,7 @@ impl TokenIdAlloc { | |||
266 | /// A Raw Token (straightly from lexer) convertor | 266 | /// A Raw Token (straightly from lexer) convertor |
267 | struct RawConvertor<'a> { | 267 | struct RawConvertor<'a> { |
268 | text: &'a str, | 268 | text: &'a str, |
269 | offset: TextUnit, | 269 | offset: TextSize, |
270 | id_alloc: TokenIdAlloc, | 270 | id_alloc: TokenIdAlloc, |
271 | inner: std::slice::Iter<'a, RawToken>, | 271 | inner: std::slice::Iter<'a, RawToken>, |
272 | } | 272 | } |
@@ -314,7 +314,7 @@ trait TokenConvertor { | |||
314 | } | 314 | } |
315 | 315 | ||
316 | result.push(if k.is_punct() { | 316 | result.push(if k.is_punct() { |
317 | assert_eq!(range.len().to_usize(), 1); | 317 | assert_eq!(range.len(), TextSize::of('.')); |
318 | let delim = match k { | 318 | let delim = match k { |
319 | T!['('] => Some((tt::DelimiterKind::Parenthesis, T![')'])), | 319 | T!['('] => Some((tt::DelimiterKind::Parenthesis, T![')'])), |
320 | T!['{'] => Some((tt::DelimiterKind::Brace, T!['}'])), | 320 | T!['{'] => Some((tt::DelimiterKind::Brace, T!['}'])), |
@@ -381,8 +381,8 @@ trait TokenConvertor { | |||
381 | k if k.is_keyword() => make_leaf!(Ident), | 381 | k if k.is_keyword() => make_leaf!(Ident), |
382 | k if k.is_literal() => make_leaf!(Literal), | 382 | k if k.is_literal() => make_leaf!(Literal), |
383 | LIFETIME => { | 383 | LIFETIME => { |
384 | let char_unit = TextUnit::from_usize(1); | 384 | let char_unit = TextSize::of('\''); |
385 | let r = TextRange::offset_len(range.start(), char_unit); | 385 | let r = TextRange::at(range.start(), char_unit); |
386 | let apostrophe = tt::Leaf::from(tt::Punct { | 386 | let apostrophe = tt::Leaf::from(tt::Punct { |
387 | char: '\'', | 387 | char: '\'', |
388 | spacing: tt::Spacing::Joint, | 388 | spacing: tt::Spacing::Joint, |
@@ -390,8 +390,7 @@ trait TokenConvertor { | |||
390 | }); | 390 | }); |
391 | result.push(apostrophe.into()); | 391 | result.push(apostrophe.into()); |
392 | 392 | ||
393 | let r = | 393 | let r = TextRange::at(range.start() + char_unit, range.len() - char_unit); |
394 | TextRange::offset_len(range.start() + char_unit, range.len() - char_unit); | ||
395 | let ident = tt::Leaf::from(tt::Ident { | 394 | let ident = tt::Leaf::from(tt::Ident { |
396 | text: SmolStr::new(&token.to_text()[1..]), | 395 | text: SmolStr::new(&token.to_text()[1..]), |
397 | id: self.id_alloc().alloc(r), | 396 | id: self.id_alloc().alloc(r), |
@@ -440,7 +439,7 @@ impl<'a> TokenConvertor for RawConvertor<'a> { | |||
440 | 439 | ||
441 | fn bump(&mut self) -> Option<(Self::Token, TextRange)> { | 440 | fn bump(&mut self) -> Option<(Self::Token, TextRange)> { |
442 | let token = self.inner.next()?; | 441 | let token = self.inner.next()?; |
443 | let range = TextRange::offset_len(self.offset, token.len); | 442 | let range = TextRange::at(self.offset, token.len); |
444 | self.offset += token.len; | 443 | self.offset += token.len; |
445 | 444 | ||
446 | Some(((*token, &self.text[range]), range)) | 445 | Some(((*token, &self.text[range]), range)) |
@@ -450,7 +449,7 @@ impl<'a> TokenConvertor for RawConvertor<'a> { | |||
450 | let token = self.inner.as_slice().get(0).cloned(); | 449 | let token = self.inner.as_slice().get(0).cloned(); |
451 | 450 | ||
452 | token.map(|it| { | 451 | token.map(|it| { |
453 | let range = TextRange::offset_len(self.offset, it.len); | 452 | let range = TextRange::at(self.offset, it.len); |
454 | (it, &self.text[range]) | 453 | (it, &self.text[range]) |
455 | }) | 454 | }) |
456 | } | 455 | } |
@@ -464,11 +463,11 @@ struct Convertor { | |||
464 | id_alloc: TokenIdAlloc, | 463 | id_alloc: TokenIdAlloc, |
465 | current: Option<SyntaxToken>, | 464 | current: Option<SyntaxToken>, |
466 | range: TextRange, | 465 | range: TextRange, |
467 | punct_offset: Option<(SyntaxToken, TextUnit)>, | 466 | punct_offset: Option<(SyntaxToken, TextSize)>, |
468 | } | 467 | } |
469 | 468 | ||
470 | impl Convertor { | 469 | impl Convertor { |
471 | fn new(node: &SyntaxNode, global_offset: TextUnit) -> Convertor { | 470 | fn new(node: &SyntaxNode, global_offset: TextSize) -> Convertor { |
472 | Convertor { | 471 | Convertor { |
473 | id_alloc: { TokenIdAlloc { map: TokenMap::default(), global_offset, next_id: 0 } }, | 472 | id_alloc: { TokenIdAlloc { map: TokenMap::default(), global_offset, next_id: 0 } }, |
474 | current: node.first_token(), | 473 | current: node.first_token(), |
@@ -481,7 +480,7 @@ impl Convertor { | |||
481 | #[derive(Debug)] | 480 | #[derive(Debug)] |
482 | enum SynToken { | 481 | enum SynToken { |
483 | Ordiniary(SyntaxToken), | 482 | Ordiniary(SyntaxToken), |
484 | Punch(SyntaxToken, TextUnit), | 483 | Punch(SyntaxToken, TextSize), |
485 | } | 484 | } |
486 | 485 | ||
487 | impl SynToken { | 486 | impl SynToken { |
@@ -500,7 +499,7 @@ impl SrcToken for SynToken { | |||
500 | fn to_char(&self) -> Option<char> { | 499 | fn to_char(&self) -> Option<char> { |
501 | match self { | 500 | match self { |
502 | SynToken::Ordiniary(_) => None, | 501 | SynToken::Ordiniary(_) => None, |
503 | SynToken::Punch(it, i) => it.text().chars().nth(i.to_usize()), | 502 | SynToken::Punch(it, i) => it.text().chars().nth((*i).into()), |
504 | } | 503 | } |
505 | } | 504 | } |
506 | fn to_text(&self) -> SmolStr { | 505 | fn to_text(&self) -> SmolStr { |
@@ -516,26 +515,26 @@ impl TokenConvertor for Convertor { | |||
516 | 515 | ||
517 | fn bump(&mut self) -> Option<(Self::Token, TextRange)> { | 516 | fn bump(&mut self) -> Option<(Self::Token, TextRange)> { |
518 | if let Some((punct, offset)) = self.punct_offset.clone() { | 517 | if let Some((punct, offset)) = self.punct_offset.clone() { |
519 | if offset.to_usize() + 1 < punct.text().len() { | 518 | if usize::from(offset) + 1 < punct.text().len() { |
520 | let offset = offset + TextUnit::from_usize(1); | 519 | let offset = offset + TextSize::from_usize(1); |
521 | let range = punct.text_range(); | 520 | let range = punct.text_range(); |
522 | self.punct_offset = Some((punct.clone(), offset)); | 521 | self.punct_offset = Some((punct.clone(), offset)); |
523 | let range = TextRange::offset_len(range.start() + offset, TextUnit::from_usize(1)); | 522 | let range = TextRange::at(range.start() + offset, TextSize::of('.')); |
524 | return Some((SynToken::Punch(punct, offset), range)); | 523 | return Some((SynToken::Punch(punct, offset), range)); |
525 | } | 524 | } |
526 | } | 525 | } |
527 | 526 | ||
528 | let curr = self.current.clone()?; | 527 | let curr = self.current.clone()?; |
529 | if !curr.text_range().is_subrange(&self.range) { | 528 | if !&self.range.contains_range(curr.text_range()) { |
530 | return None; | 529 | return None; |
531 | } | 530 | } |
532 | self.current = curr.next_token(); | 531 | self.current = curr.next_token(); |
533 | 532 | ||
534 | let token = if curr.kind().is_punct() { | 533 | let token = if curr.kind().is_punct() { |
535 | let range = curr.text_range(); | 534 | let range = curr.text_range(); |
536 | let range = TextRange::offset_len(range.start(), TextUnit::from_usize(1)); | 535 | let range = TextRange::at(range.start(), TextSize::from_usize(1)); |
537 | self.punct_offset = Some((curr.clone(), TextUnit::from_usize(0))); | 536 | self.punct_offset = Some((curr.clone(), TextSize::from_usize(0))); |
538 | (SynToken::Punch(curr, TextUnit::from_usize(0)), range) | 537 | (SynToken::Punch(curr, TextSize::from_usize(0)), range) |
539 | } else { | 538 | } else { |
540 | self.punct_offset = None; | 539 | self.punct_offset = None; |
541 | let range = curr.text_range(); | 540 | let range = curr.text_range(); |
@@ -547,19 +546,19 @@ impl TokenConvertor for Convertor { | |||
547 | 546 | ||
548 | fn peek(&self) -> Option<Self::Token> { | 547 | fn peek(&self) -> Option<Self::Token> { |
549 | if let Some((punct, mut offset)) = self.punct_offset.clone() { | 548 | if let Some((punct, mut offset)) = self.punct_offset.clone() { |
550 | offset = offset + TextUnit::from_usize(1); | 549 | offset = offset + TextSize::from_usize(1); |
551 | if offset.to_usize() < punct.text().len() { | 550 | if usize::from(offset) < punct.text().len() { |
552 | return Some(SynToken::Punch(punct, offset)); | 551 | return Some(SynToken::Punch(punct, offset)); |
553 | } | 552 | } |
554 | } | 553 | } |
555 | 554 | ||
556 | let curr = self.current.clone()?; | 555 | let curr = self.current.clone()?; |
557 | if !curr.text_range().is_subrange(&self.range) { | 556 | if !self.range.contains_range(curr.text_range()) { |
558 | return None; | 557 | return None; |
559 | } | 558 | } |
560 | 559 | ||
561 | let token = if curr.kind().is_punct() { | 560 | let token = if curr.kind().is_punct() { |
562 | SynToken::Punch(curr, TextUnit::from_usize(0)) | 561 | SynToken::Punch(curr, TextSize::from_usize(0)) |
563 | } else { | 562 | } else { |
564 | SynToken::Ordiniary(curr) | 563 | SynToken::Ordiniary(curr) |
565 | }; | 564 | }; |
@@ -574,8 +573,8 @@ impl TokenConvertor for Convertor { | |||
574 | struct TtTreeSink<'a> { | 573 | struct TtTreeSink<'a> { |
575 | buf: String, | 574 | buf: String, |
576 | cursor: Cursor<'a>, | 575 | cursor: Cursor<'a>, |
577 | open_delims: FxHashMap<tt::TokenId, TextUnit>, | 576 | open_delims: FxHashMap<tt::TokenId, TextSize>, |
578 | text_pos: TextUnit, | 577 | text_pos: TextSize, |
579 | inner: SyntaxTreeBuilder, | 578 | inner: SyntaxTreeBuilder, |
580 | token_map: TokenMap, | 579 | token_map: TokenMap, |
581 | 580 | ||
@@ -641,7 +640,7 @@ impl<'a> TreeSink for TtTreeSink<'a> { | |||
641 | } | 640 | } |
642 | tt::Leaf::Literal(lit) => (lit.text.clone(), lit.id), | 641 | tt::Leaf::Literal(lit) => (lit.text.clone(), lit.id), |
643 | }; | 642 | }; |
644 | let range = TextRange::offset_len(self.text_pos, TextUnit::of_str(&text)); | 643 | let range = TextRange::at(self.text_pos, TextSize::of(text.as_str())); |
645 | self.token_map.insert(id, range); | 644 | self.token_map.insert(id, range); |
646 | self.cursor = self.cursor.bump(); | 645 | self.cursor = self.cursor.bump(); |
647 | text | 646 | text |
@@ -658,10 +657,8 @@ impl<'a> TreeSink for TtTreeSink<'a> { | |||
658 | self.cursor = self.cursor.bump(); | 657 | self.cursor = self.cursor.bump(); |
659 | if let Some(id) = parent.delimiter.map(|it| it.id) { | 658 | if let Some(id) = parent.delimiter.map(|it| it.id) { |
660 | if let Some(open_delim) = self.open_delims.get(&id) { | 659 | if let Some(open_delim) = self.open_delims.get(&id) { |
661 | let open_range = | 660 | let open_range = TextRange::at(*open_delim, TextSize::of('(')); |
662 | TextRange::offset_len(*open_delim, TextUnit::from_usize(1)); | 661 | let close_range = TextRange::at(self.text_pos, TextSize::of('(')); |
663 | let close_range = | ||
664 | TextRange::offset_len(self.text_pos, TextUnit::from_usize(1)); | ||
665 | self.token_map.insert_delim(id, open_range, close_range); | 662 | self.token_map.insert_delim(id, open_range, close_range); |
666 | } | 663 | } |
667 | } | 664 | } |
@@ -672,7 +669,7 @@ impl<'a> TreeSink for TtTreeSink<'a> { | |||
672 | } | 669 | } |
673 | }; | 670 | }; |
674 | self.buf += &text; | 671 | self.buf += &text; |
675 | self.text_pos += TextUnit::of_str(&text); | 672 | self.text_pos += TextSize::of(text.as_str()); |
676 | } | 673 | } |
677 | 674 | ||
678 | let text = SmolStr::new(self.buf.as_str()); | 675 | let text = SmolStr::new(self.buf.as_str()); |
@@ -690,7 +687,7 @@ impl<'a> TreeSink for TtTreeSink<'a> { | |||
690 | // other parts of RA such that we don't add whitespace here. | 687 | // other parts of RA such that we don't add whitespace here. |
691 | if curr.spacing == tt::Spacing::Alone && curr.char != ';' { | 688 | if curr.spacing == tt::Spacing::Alone && curr.char != ';' { |
692 | self.inner.token(WHITESPACE, " ".into()); | 689 | self.inner.token(WHITESPACE, " ".into()); |
693 | self.text_pos += TextUnit::of_char(' '); | 690 | self.text_pos += TextSize::of(' '); |
694 | } | 691 | } |
695 | } | 692 | } |
696 | } | 693 | } |
diff --git a/crates/ra_syntax/Cargo.toml b/crates/ra_syntax/Cargo.toml index 75a2f696e..dda396582 100644 --- a/crates/ra_syntax/Cargo.toml +++ b/crates/ra_syntax/Cargo.toml | |||
@@ -12,7 +12,7 @@ doctest = false | |||
12 | 12 | ||
13 | [dependencies] | 13 | [dependencies] |
14 | itertools = "0.9.0" | 14 | itertools = "0.9.0" |
15 | rowan = "0.9.1" | 15 | rowan = { path = "../../../rowan" } |
16 | rustc_lexer = { version = "652.0.0", package = "rustc-ap-rustc_lexer" } | 16 | rustc_lexer = { version = "652.0.0", package = "rustc-ap-rustc_lexer" } |
17 | rustc-hash = "1.1.0" | 17 | rustc-hash = "1.1.0" |
18 | arrayvec = "0.5.1" | 18 | arrayvec = "0.5.1" |
diff --git a/crates/ra_syntax/src/algo.rs b/crates/ra_syntax/src/algo.rs index 06df8495c..2a8dac757 100644 --- a/crates/ra_syntax/src/algo.rs +++ b/crates/ra_syntax/src/algo.rs | |||
@@ -11,7 +11,7 @@ use rustc_hash::FxHashMap; | |||
11 | 11 | ||
12 | use crate::{ | 12 | use crate::{ |
13 | AstNode, Direction, NodeOrToken, SyntaxElement, SyntaxKind, SyntaxNode, SyntaxNodePtr, | 13 | AstNode, Direction, NodeOrToken, SyntaxElement, SyntaxKind, SyntaxNode, SyntaxNodePtr, |
14 | SyntaxToken, TextRange, TextUnit, | 14 | SyntaxToken, TextRange, TextSize, |
15 | }; | 15 | }; |
16 | 16 | ||
17 | /// Returns ancestors of the node at the offset, sorted by length. This should | 17 | /// Returns ancestors of the node at the offset, sorted by length. This should |
@@ -21,7 +21,7 @@ use crate::{ | |||
21 | /// t.parent().ancestors())`. | 21 | /// t.parent().ancestors())`. |
22 | pub fn ancestors_at_offset( | 22 | pub fn ancestors_at_offset( |
23 | node: &SyntaxNode, | 23 | node: &SyntaxNode, |
24 | offset: TextUnit, | 24 | offset: TextSize, |
25 | ) -> impl Iterator<Item = SyntaxNode> { | 25 | ) -> impl Iterator<Item = SyntaxNode> { |
26 | node.token_at_offset(offset) | 26 | node.token_at_offset(offset) |
27 | .map(|token| token.parent().ancestors()) | 27 | .map(|token| token.parent().ancestors()) |
@@ -37,7 +37,7 @@ pub fn ancestors_at_offset( | |||
37 | /// ``` | 37 | /// ``` |
38 | /// | 38 | /// |
39 | /// then the shorter node will be silently preferred. | 39 | /// then the shorter node will be silently preferred. |
40 | pub fn find_node_at_offset<N: AstNode>(syntax: &SyntaxNode, offset: TextUnit) -> Option<N> { | 40 | pub fn find_node_at_offset<N: AstNode>(syntax: &SyntaxNode, offset: TextSize) -> Option<N> { |
41 | ancestors_at_offset(syntax, offset).find_map(N::cast) | 41 | ancestors_at_offset(syntax, offset).find_map(N::cast) |
42 | } | 42 | } |
43 | 43 | ||
@@ -180,7 +180,7 @@ fn _insert_children( | |||
180 | position: InsertPosition<SyntaxElement>, | 180 | position: InsertPosition<SyntaxElement>, |
181 | to_insert: &mut dyn Iterator<Item = SyntaxElement>, | 181 | to_insert: &mut dyn Iterator<Item = SyntaxElement>, |
182 | ) -> SyntaxNode { | 182 | ) -> SyntaxNode { |
183 | let mut delta = TextUnit::default(); | 183 | let mut delta = TextSize::default(); |
184 | let to_insert = to_insert.map(|element| { | 184 | let to_insert = to_insert.map(|element| { |
185 | delta += element.text_range().len(); | 185 | delta += element.text_range().len(); |
186 | to_green_element(element) | 186 | to_green_element(element) |
@@ -347,7 +347,7 @@ fn with_children( | |||
347 | parent: &SyntaxNode, | 347 | parent: &SyntaxNode, |
348 | new_children: Vec<NodeOrToken<rowan::GreenNode, rowan::GreenToken>>, | 348 | new_children: Vec<NodeOrToken<rowan::GreenNode, rowan::GreenToken>>, |
349 | ) -> SyntaxNode { | 349 | ) -> SyntaxNode { |
350 | let len = new_children.iter().map(|it| it.text_len()).sum::<TextUnit>(); | 350 | let len = new_children.iter().map(|it| it.text_len()).sum::<TextSize>(); |
351 | let new_node = rowan::GreenNode::new(rowan::SyntaxKind(parent.kind() as u16), new_children); | 351 | let new_node = rowan::GreenNode::new(rowan::SyntaxKind(parent.kind() as u16), new_children); |
352 | let new_root_node = parent.replace_with(new_node); | 352 | let new_root_node = parent.replace_with(new_node); |
353 | let new_root_node = SyntaxNode::new_root(new_root_node); | 353 | let new_root_node = SyntaxNode::new_root(new_root_node); |
@@ -355,7 +355,7 @@ fn with_children( | |||
355 | // FIXME: use a more elegant way to re-fetch the node (#1185), make | 355 | // FIXME: use a more elegant way to re-fetch the node (#1185), make |
356 | // `range` private afterwards | 356 | // `range` private afterwards |
357 | let mut ptr = SyntaxNodePtr::new(parent); | 357 | let mut ptr = SyntaxNodePtr::new(parent); |
358 | ptr.range = TextRange::offset_len(ptr.range.start(), len); | 358 | ptr.range = TextRange::at(ptr.range.start(), len); |
359 | ptr.to_node(&new_root_node) | 359 | ptr.to_node(&new_root_node) |
360 | } | 360 | } |
361 | 361 | ||
diff --git a/crates/ra_syntax/src/ast/tokens.rs b/crates/ra_syntax/src/ast/tokens.rs index aa34b682d..26b8f9c36 100644 --- a/crates/ra_syntax/src/ast/tokens.rs +++ b/crates/ra_syntax/src/ast/tokens.rs | |||
@@ -2,7 +2,7 @@ | |||
2 | 2 | ||
3 | use crate::{ | 3 | use crate::{ |
4 | ast::{AstToken, Comment, RawString, String, Whitespace}, | 4 | ast::{AstToken, Comment, RawString, String, Whitespace}, |
5 | TextRange, TextUnit, | 5 | TextRange, TextSize, |
6 | }; | 6 | }; |
7 | 7 | ||
8 | impl Comment { | 8 | impl Comment { |
@@ -94,14 +94,14 @@ impl QuoteOffsets { | |||
94 | return None; | 94 | return None; |
95 | } | 95 | } |
96 | 96 | ||
97 | let start = TextUnit::from(0); | 97 | let start = TextSize::from(0); |
98 | let left_quote = TextUnit::from_usize(left_quote) + TextUnit::of_char('"'); | 98 | let left_quote = TextSize::from_usize(left_quote) + TextSize::of('"'); |
99 | let right_quote = TextUnit::from_usize(right_quote); | 99 | let right_quote = TextSize::from_usize(right_quote); |
100 | let end = TextUnit::of_str(literal); | 100 | let end = TextSize::of(literal); |
101 | 101 | ||
102 | let res = QuoteOffsets { | 102 | let res = QuoteOffsets { |
103 | quotes: [TextRange::from_to(start, left_quote), TextRange::from_to(right_quote, end)], | 103 | quotes: [TextRange::new(start, left_quote), TextRange::new(right_quote, end)], |
104 | contents: TextRange::from_to(left_quote, right_quote), | 104 | contents: TextRange::new(left_quote, right_quote), |
105 | }; | 105 | }; |
106 | Some(res) | 106 | Some(res) |
107 | } | 107 | } |
@@ -168,7 +168,7 @@ impl HasStringValue for RawString { | |||
168 | impl RawString { | 168 | impl RawString { |
169 | pub fn map_range_up(&self, range: TextRange) -> Option<TextRange> { | 169 | pub fn map_range_up(&self, range: TextRange) -> Option<TextRange> { |
170 | let contents_range = self.text_range_between_quotes()?; | 170 | let contents_range = self.text_range_between_quotes()?; |
171 | assert!(range.is_subrange(&TextRange::offset_len(0.into(), contents_range.len()))); | 171 | assert!(TextRange::up_to(contents_range.len()).contains_range(range)); |
172 | Some(range + contents_range.start()) | 172 | Some(range + contents_range.start()) |
173 | } | 173 | } |
174 | } | 174 | } |
@@ -459,7 +459,7 @@ pub trait HasFormatSpecifier: AstToken { | |||
459 | while let Some((r, Ok(next_char))) = chars.peek() { | 459 | while let Some((r, Ok(next_char))) = chars.peek() { |
460 | if next_char.is_ascii_digit() { | 460 | if next_char.is_ascii_digit() { |
461 | chars.next(); | 461 | chars.next(); |
462 | range = range.extend_to(r); | 462 | range = range.cover(*r); |
463 | } else { | 463 | } else { |
464 | break; | 464 | break; |
465 | } | 465 | } |
@@ -477,7 +477,7 @@ pub trait HasFormatSpecifier: AstToken { | |||
477 | while let Some((r, Ok(next_char))) = chars.peek() { | 477 | while let Some((r, Ok(next_char))) = chars.peek() { |
478 | if *next_char == '_' || next_char.is_ascii_digit() || next_char.is_alphabetic() { | 478 | if *next_char == '_' || next_char.is_ascii_digit() || next_char.is_alphabetic() { |
479 | chars.next(); | 479 | chars.next(); |
480 | range = range.extend_to(r); | 480 | range = range.cover(*r); |
481 | } else { | 481 | } else { |
482 | break; | 482 | break; |
483 | } | 483 | } |
@@ -498,10 +498,8 @@ impl HasFormatSpecifier for String { | |||
498 | let mut res = Vec::with_capacity(text.len()); | 498 | let mut res = Vec::with_capacity(text.len()); |
499 | rustc_lexer::unescape::unescape_str(text, &mut |range, unescaped_char| { | 499 | rustc_lexer::unescape::unescape_str(text, &mut |range, unescaped_char| { |
500 | res.push(( | 500 | res.push(( |
501 | TextRange::from_to( | 501 | TextRange::new(TextSize::from_usize(range.start), TextSize::from_usize(range.end)) |
502 | TextUnit::from_usize(range.start), | 502 | + offset, |
503 | TextUnit::from_usize(range.end), | ||
504 | ) + offset, | ||
505 | unescaped_char, | 503 | unescaped_char, |
506 | )) | 504 | )) |
507 | }); | 505 | }); |
@@ -521,10 +519,8 @@ impl HasFormatSpecifier for RawString { | |||
521 | let mut res = Vec::with_capacity(text.len()); | 519 | let mut res = Vec::with_capacity(text.len()); |
522 | for (idx, c) in text.char_indices() { | 520 | for (idx, c) in text.char_indices() { |
523 | res.push(( | 521 | res.push(( |
524 | TextRange::from_to( | 522 | TextRange::new(TextSize::from_usize(idx), TextSize::from_usize(idx + c.len_utf8())) |
525 | TextUnit::from_usize(idx), | 523 | + offset, |
526 | TextUnit::from_usize(idx + c.len_utf8()), | ||
527 | ) + offset, | ||
528 | Ok(c), | 524 | Ok(c), |
529 | )); | 525 | )); |
530 | } | 526 | } |
diff --git a/crates/ra_syntax/src/fuzz.rs b/crates/ra_syntax/src/fuzz.rs index 7012df7f0..15aad2205 100644 --- a/crates/ra_syntax/src/fuzz.rs +++ b/crates/ra_syntax/src/fuzz.rs | |||
@@ -1,6 +1,6 @@ | |||
1 | //! FIXME: write short doc here | 1 | //! FIXME: write short doc here |
2 | 2 | ||
3 | use crate::{validation, AstNode, SourceFile, TextRange, TextUnit}; | 3 | use crate::{validation, AstNode, SourceFile, TextRange, TextSize}; |
4 | use ra_text_edit::AtomTextEdit; | 4 | use ra_text_edit::AtomTextEdit; |
5 | use std::str::{self, FromStr}; | 5 | use std::str::{self, FromStr}; |
6 | 6 | ||
@@ -34,10 +34,8 @@ impl CheckReparse { | |||
34 | let text = lines.collect::<Vec<_>>().join("\n"); | 34 | let text = lines.collect::<Vec<_>>().join("\n"); |
35 | let text = format!("{}{}{}", PREFIX, text, SUFFIX); | 35 | let text = format!("{}{}{}", PREFIX, text, SUFFIX); |
36 | text.get(delete_start..delete_start.checked_add(delete_len)?)?; // make sure delete is a valid range | 36 | text.get(delete_start..delete_start.checked_add(delete_len)?)?; // make sure delete is a valid range |
37 | let delete = TextRange::offset_len( | 37 | let delete = |
38 | TextUnit::from_usize(delete_start), | 38 | TextRange::at(TextSize::from_usize(delete_start), TextSize::from_usize(delete_len)); |
39 | TextUnit::from_usize(delete_len), | ||
40 | ); | ||
41 | let edited_text = | 39 | let edited_text = |
42 | format!("{}{}{}", &text[..delete_start], &insert, &text[delete_start + delete_len..]); | 40 | format!("{}{}{}", &text[..delete_start], &insert, &text[delete_start + delete_len..]); |
43 | let edit = AtomTextEdit { delete, insert }; | 41 | let edit = AtomTextEdit { delete, insert }; |
diff --git a/crates/ra_syntax/src/lib.rs b/crates/ra_syntax/src/lib.rs index a796e78b1..ceeb2bde9 100644 --- a/crates/ra_syntax/src/lib.rs +++ b/crates/ra_syntax/src/lib.rs | |||
@@ -55,7 +55,7 @@ pub use crate::{ | |||
55 | }, | 55 | }, |
56 | }; | 56 | }; |
57 | pub use ra_parser::{SyntaxKind, T}; | 57 | pub use ra_parser::{SyntaxKind, T}; |
58 | pub use rowan::{SmolStr, SyntaxText, TextRange, TextUnit, TokenAtOffset, WalkEvent}; | 58 | pub use rowan::{SmolStr, SyntaxText, TextRange, TextSize, TokenAtOffset, WalkEvent}; |
59 | 59 | ||
60 | /// `Parse` is the result of the parsing: a syntax tree and a collection of | 60 | /// `Parse` is the result of the parsing: a syntax tree and a collection of |
61 | /// errors. | 61 | /// errors. |
@@ -266,7 +266,7 @@ fn api_walkthrough() { | |||
266 | assert_eq!(expr_syntax.kind(), SyntaxKind::BIN_EXPR); | 266 | assert_eq!(expr_syntax.kind(), SyntaxKind::BIN_EXPR); |
267 | 267 | ||
268 | // And text range: | 268 | // And text range: |
269 | assert_eq!(expr_syntax.text_range(), TextRange::from_to(32.into(), 37.into())); | 269 | assert_eq!(expr_syntax.text_range(), TextRange::new(32.into(), 37.into())); |
270 | 270 | ||
271 | // You can get node's text as a `SyntaxText` object, which will traverse the | 271 | // You can get node's text as a `SyntaxText` object, which will traverse the |
272 | // tree collecting token's text: | 272 | // tree collecting token's text: |
diff --git a/crates/ra_syntax/src/parsing/lexer.rs b/crates/ra_syntax/src/parsing/lexer.rs index 67c1f1b48..1fdc76d98 100644 --- a/crates/ra_syntax/src/parsing/lexer.rs +++ b/crates/ra_syntax/src/parsing/lexer.rs | |||
@@ -4,7 +4,7 @@ | |||
4 | use crate::{ | 4 | use crate::{ |
5 | SyntaxError, | 5 | SyntaxError, |
6 | SyntaxKind::{self, *}, | 6 | SyntaxKind::{self, *}, |
7 | TextRange, TextUnit, T, | 7 | TextRange, TextSize, T, |
8 | }; | 8 | }; |
9 | 9 | ||
10 | /// A token of Rust source. | 10 | /// A token of Rust source. |
@@ -13,7 +13,7 @@ pub struct Token { | |||
13 | /// The kind of token. | 13 | /// The kind of token. |
14 | pub kind: SyntaxKind, | 14 | pub kind: SyntaxKind, |
15 | /// The length of the token. | 15 | /// The length of the token. |
16 | pub len: TextUnit, | 16 | pub len: TextSize, |
17 | } | 17 | } |
18 | 18 | ||
19 | /// Break a string up into its component tokens. | 19 | /// Break a string up into its component tokens. |
@@ -30,7 +30,7 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) { | |||
30 | 30 | ||
31 | let mut offset: usize = rustc_lexer::strip_shebang(text) | 31 | let mut offset: usize = rustc_lexer::strip_shebang(text) |
32 | .map(|shebang_len| { | 32 | .map(|shebang_len| { |
33 | tokens.push(Token { kind: SHEBANG, len: TextUnit::from_usize(shebang_len) }); | 33 | tokens.push(Token { kind: SHEBANG, len: TextSize::from_usize(shebang_len) }); |
34 | shebang_len | 34 | shebang_len |
35 | }) | 35 | }) |
36 | .unwrap_or(0); | 36 | .unwrap_or(0); |
@@ -38,8 +38,8 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) { | |||
38 | let text_without_shebang = &text[offset..]; | 38 | let text_without_shebang = &text[offset..]; |
39 | 39 | ||
40 | for rustc_token in rustc_lexer::tokenize(text_without_shebang) { | 40 | for rustc_token in rustc_lexer::tokenize(text_without_shebang) { |
41 | let token_len = TextUnit::from_usize(rustc_token.len); | 41 | let token_len = TextSize::from_usize(rustc_token.len); |
42 | let token_range = TextRange::offset_len(TextUnit::from_usize(offset), token_len); | 42 | let token_range = TextRange::at(TextSize::from_usize(offset), token_len); |
43 | 43 | ||
44 | let (syntax_kind, err_message) = | 44 | let (syntax_kind, err_message) = |
45 | rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]); | 45 | rustc_token_kind_to_syntax_kind(&rustc_token.kind, &text[token_range]); |
@@ -65,7 +65,7 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) { | |||
65 | /// Beware that unescape errors are not checked at tokenization time. | 65 | /// Beware that unescape errors are not checked at tokenization time. |
66 | pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> { | 66 | pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> { |
67 | lex_first_token(text) | 67 | lex_first_token(text) |
68 | .filter(|(token, _)| token.len == TextUnit::of_str(text)) | 68 | .filter(|(token, _)| token.len == TextSize::of(text)) |
69 | .map(|(token, error)| (token.kind, error)) | 69 | .map(|(token, error)| (token.kind, error)) |
70 | } | 70 | } |
71 | 71 | ||
@@ -75,7 +75,7 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr | |||
75 | /// Beware that unescape errors are not checked at tokenization time. | 75 | /// Beware that unescape errors are not checked at tokenization time. |
76 | pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> { | 76 | pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> { |
77 | lex_first_token(text) | 77 | lex_first_token(text) |
78 | .filter(|(token, error)| !error.is_some() && token.len == TextUnit::of_str(text)) | 78 | .filter(|(token, error)| !error.is_some() && token.len == TextSize::of(text)) |
79 | .map(|(token, _error)| token.kind) | 79 | .map(|(token, _error)| token.kind) |
80 | } | 80 | } |
81 | 81 | ||
@@ -96,9 +96,9 @@ fn lex_first_token(text: &str) -> Option<(Token, Option<SyntaxError>)> { | |||
96 | let rustc_token = rustc_lexer::first_token(text); | 96 | let rustc_token = rustc_lexer::first_token(text); |
97 | let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text); | 97 | let (syntax_kind, err_message) = rustc_token_kind_to_syntax_kind(&rustc_token.kind, text); |
98 | 98 | ||
99 | let token = Token { kind: syntax_kind, len: TextUnit::from_usize(rustc_token.len) }; | 99 | let token = Token { kind: syntax_kind, len: TextSize::from_usize(rustc_token.len) }; |
100 | let optional_error = err_message.map(|err_message| { | 100 | let optional_error = err_message.map(|err_message| { |
101 | SyntaxError::new(err_message, TextRange::from_to(0.into(), TextUnit::of_str(text))) | 101 | SyntaxError::new(err_message, TextRange::new(0.into(), TextSize::of(text))) |
102 | }); | 102 | }); |
103 | 103 | ||
104 | Some((token, optional_error)) | 104 | Some((token, optional_error)) |
diff --git a/crates/ra_syntax/src/parsing/reparsing.rs b/crates/ra_syntax/src/parsing/reparsing.rs index 2d65b91f1..ffff0a7b2 100644 --- a/crates/ra_syntax/src/parsing/reparsing.rs +++ b/crates/ra_syntax/src/parsing/reparsing.rs | |||
@@ -19,7 +19,7 @@ use crate::{ | |||
19 | syntax_node::{GreenNode, GreenToken, NodeOrToken, SyntaxElement, SyntaxNode}, | 19 | syntax_node::{GreenNode, GreenToken, NodeOrToken, SyntaxElement, SyntaxNode}, |
20 | SyntaxError, | 20 | SyntaxError, |
21 | SyntaxKind::*, | 21 | SyntaxKind::*, |
22 | TextRange, TextUnit, T, | 22 | TextRange, TextSize, T, |
23 | }; | 23 | }; |
24 | 24 | ||
25 | pub(crate) fn incremental_reparse( | 25 | pub(crate) fn incremental_reparse( |
@@ -176,7 +176,7 @@ fn merge_errors( | |||
176 | if old_err_range.end() <= range_before_reparse.start() { | 176 | if old_err_range.end() <= range_before_reparse.start() { |
177 | res.push(old_err); | 177 | res.push(old_err); |
178 | } else if old_err_range.start() >= range_before_reparse.end() { | 178 | } else if old_err_range.start() >= range_before_reparse.end() { |
179 | let inserted_len = TextUnit::of_str(&edit.insert); | 179 | let inserted_len = TextSize::of(&edit.insert); |
180 | res.push(old_err.with_range((old_err_range + inserted_len) - edit.delete.len())); | 180 | res.push(old_err.with_range((old_err_range + inserted_len) - edit.delete.len())); |
181 | // Note: extra parens are intentional to prevent uint underflow, HWAB (here was a bug) | 181 | // Note: extra parens are intentional to prevent uint underflow, HWAB (here was a bug) |
182 | } | 182 | } |
diff --git a/crates/ra_syntax/src/parsing/text_token_source.rs b/crates/ra_syntax/src/parsing/text_token_source.rs index e2433913c..7ddc2c2c3 100644 --- a/crates/ra_syntax/src/parsing/text_token_source.rs +++ b/crates/ra_syntax/src/parsing/text_token_source.rs | |||
@@ -3,7 +3,7 @@ | |||
3 | use ra_parser::Token as PToken; | 3 | use ra_parser::Token as PToken; |
4 | use ra_parser::TokenSource; | 4 | use ra_parser::TokenSource; |
5 | 5 | ||
6 | use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextUnit}; | 6 | use crate::{parsing::lexer::Token, SyntaxKind::EOF, TextRange, TextSize}; |
7 | 7 | ||
8 | pub(crate) struct TextTokenSource<'t> { | 8 | pub(crate) struct TextTokenSource<'t> { |
9 | text: &'t str, | 9 | text: &'t str, |
@@ -15,7 +15,7 @@ pub(crate) struct TextTokenSource<'t> { | |||
15 | /// 0 7 10 | 15 | /// 0 7 10 |
16 | /// ``` | 16 | /// ``` |
17 | /// (token, start_offset): `[(struct, 0), (Foo, 7), (;, 10)]` | 17 | /// (token, start_offset): `[(struct, 0), (Foo, 7), (;, 10)]` |
18 | start_offsets: Vec<TextUnit>, | 18 | start_offsets: Vec<TextSize>, |
19 | /// non-whitespace/comment tokens | 19 | /// non-whitespace/comment tokens |
20 | /// ```non-rust | 20 | /// ```non-rust |
21 | /// struct Foo {} | 21 | /// struct Foo {} |
@@ -51,12 +51,12 @@ impl<'t> TokenSource for TextTokenSource<'t> { | |||
51 | if pos >= self.tokens.len() { | 51 | if pos >= self.tokens.len() { |
52 | return false; | 52 | return false; |
53 | } | 53 | } |
54 | let range = TextRange::offset_len(self.start_offsets[pos], self.tokens[pos].len); | 54 | let range = TextRange::at(self.start_offsets[pos], self.tokens[pos].len); |
55 | self.text[range] == *kw | 55 | self.text[range] == *kw |
56 | } | 56 | } |
57 | } | 57 | } |
58 | 58 | ||
59 | fn mk_token(pos: usize, start_offsets: &[TextUnit], tokens: &[Token]) -> PToken { | 59 | fn mk_token(pos: usize, start_offsets: &[TextSize], tokens: &[Token]) -> PToken { |
60 | let kind = tokens.get(pos).map(|t| t.kind).unwrap_or(EOF); | 60 | let kind = tokens.get(pos).map(|t| t.kind).unwrap_or(EOF); |
61 | let is_jointed_to_next = if pos + 1 < start_offsets.len() { | 61 | let is_jointed_to_next = if pos + 1 < start_offsets.len() { |
62 | start_offsets[pos] + tokens[pos].len == start_offsets[pos + 1] | 62 | start_offsets[pos] + tokens[pos].len == start_offsets[pos + 1] |
diff --git a/crates/ra_syntax/src/parsing/text_tree_sink.rs b/crates/ra_syntax/src/parsing/text_tree_sink.rs index 87bb21cd9..22aed1db1 100644 --- a/crates/ra_syntax/src/parsing/text_tree_sink.rs +++ b/crates/ra_syntax/src/parsing/text_tree_sink.rs | |||
@@ -9,7 +9,7 @@ use crate::{ | |||
9 | syntax_node::GreenNode, | 9 | syntax_node::GreenNode, |
10 | SmolStr, SyntaxError, | 10 | SmolStr, SyntaxError, |
11 | SyntaxKind::{self, *}, | 11 | SyntaxKind::{self, *}, |
12 | SyntaxTreeBuilder, TextRange, TextUnit, | 12 | SyntaxTreeBuilder, TextRange, TextSize, |
13 | }; | 13 | }; |
14 | 14 | ||
15 | /// Bridges the parser with our specific syntax tree representation. | 15 | /// Bridges the parser with our specific syntax tree representation. |
@@ -18,7 +18,7 @@ use crate::{ | |||
18 | pub(crate) struct TextTreeSink<'a> { | 18 | pub(crate) struct TextTreeSink<'a> { |
19 | text: &'a str, | 19 | text: &'a str, |
20 | tokens: &'a [Token], | 20 | tokens: &'a [Token], |
21 | text_pos: TextUnit, | 21 | text_pos: TextSize, |
22 | token_pos: usize, | 22 | token_pos: usize, |
23 | state: State, | 23 | state: State, |
24 | inner: SyntaxTreeBuilder, | 24 | inner: SyntaxTreeBuilder, |
@@ -42,7 +42,7 @@ impl<'a> TreeSink for TextTreeSink<'a> { | |||
42 | let len = self.tokens[self.token_pos..self.token_pos + n_tokens] | 42 | let len = self.tokens[self.token_pos..self.token_pos + n_tokens] |
43 | .iter() | 43 | .iter() |
44 | .map(|it| it.len) | 44 | .map(|it| it.len) |
45 | .sum::<TextUnit>(); | 45 | .sum::<TextSize>(); |
46 | self.do_token(kind, len, n_tokens); | 46 | self.do_token(kind, len, n_tokens); |
47 | } | 47 | } |
48 | 48 | ||
@@ -62,12 +62,12 @@ impl<'a> TreeSink for TextTreeSink<'a> { | |||
62 | self.tokens[self.token_pos..].iter().take_while(|it| it.kind.is_trivia()).count(); | 62 | self.tokens[self.token_pos..].iter().take_while(|it| it.kind.is_trivia()).count(); |
63 | let leading_trivias = &self.tokens[self.token_pos..self.token_pos + n_trivias]; | 63 | let leading_trivias = &self.tokens[self.token_pos..self.token_pos + n_trivias]; |
64 | let mut trivia_end = | 64 | let mut trivia_end = |
65 | self.text_pos + leading_trivias.iter().map(|it| it.len).sum::<TextUnit>(); | 65 | self.text_pos + leading_trivias.iter().map(|it| it.len).sum::<TextSize>(); |
66 | 66 | ||
67 | let n_attached_trivias = { | 67 | let n_attached_trivias = { |
68 | let leading_trivias = leading_trivias.iter().rev().map(|it| { | 68 | let leading_trivias = leading_trivias.iter().rev().map(|it| { |
69 | let next_end = trivia_end - it.len; | 69 | let next_end = trivia_end - it.len; |
70 | let range = TextRange::from_to(next_end, trivia_end); | 70 | let range = TextRange::new(next_end, trivia_end); |
71 | trivia_end = next_end; | 71 | trivia_end = next_end; |
72 | (it.kind, &self.text[range]) | 72 | (it.kind, &self.text[range]) |
73 | }); | 73 | }); |
@@ -132,8 +132,8 @@ impl<'a> TextTreeSink<'a> { | |||
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
135 | fn do_token(&mut self, kind: SyntaxKind, len: TextUnit, n_tokens: usize) { | 135 | fn do_token(&mut self, kind: SyntaxKind, len: TextSize, n_tokens: usize) { |
136 | let range = TextRange::offset_len(self.text_pos, len); | 136 | let range = TextRange::at(self.text_pos, len); |
137 | let text: SmolStr = self.text[range].into(); | 137 | let text: SmolStr = self.text[range].into(); |
138 | self.text_pos += len; | 138 | self.text_pos += len; |
139 | self.token_pos += n_tokens; | 139 | self.token_pos += n_tokens; |
diff --git a/crates/ra_syntax/src/ptr.rs b/crates/ra_syntax/src/ptr.rs index ecbfffcf4..62f03e93d 100644 --- a/crates/ra_syntax/src/ptr.rs +++ b/crates/ra_syntax/src/ptr.rs | |||
@@ -24,7 +24,7 @@ impl SyntaxNodePtr { | |||
24 | pub fn to_node(&self, root: &SyntaxNode) -> SyntaxNode { | 24 | pub fn to_node(&self, root: &SyntaxNode) -> SyntaxNode { |
25 | assert!(root.parent().is_none()); | 25 | assert!(root.parent().is_none()); |
26 | successors(Some(root.clone()), |node| { | 26 | successors(Some(root.clone()), |node| { |
27 | node.children().find(|it| self.range.is_subrange(&it.text_range())) | 27 | node.children().find(|it| it.text_range().contains_range(self.range)) |
28 | }) | 28 | }) |
29 | .find(|it| it.text_range() == self.range && it.kind() == self.kind) | 29 | .find(|it| it.text_range() == self.range && it.kind() == self.kind) |
30 | .unwrap_or_else(|| panic!("can't resolve local ptr to SyntaxNode: {:?}", self)) | 30 | .unwrap_or_else(|| panic!("can't resolve local ptr to SyntaxNode: {:?}", self)) |
diff --git a/crates/ra_syntax/src/syntax_error.rs b/crates/ra_syntax/src/syntax_error.rs index 54acf7847..7c4511fec 100644 --- a/crates/ra_syntax/src/syntax_error.rs +++ b/crates/ra_syntax/src/syntax_error.rs | |||
@@ -2,7 +2,7 @@ | |||
2 | 2 | ||
3 | use std::fmt; | 3 | use std::fmt; |
4 | 4 | ||
5 | use crate::{TextRange, TextUnit}; | 5 | use crate::{TextRange, TextSize}; |
6 | 6 | ||
7 | /// Represents the result of unsuccessful tokenization, parsing | 7 | /// Represents the result of unsuccessful tokenization, parsing |
8 | /// or tree validation. | 8 | /// or tree validation. |
@@ -23,8 +23,8 @@ impl SyntaxError { | |||
23 | pub fn new(message: impl Into<String>, range: TextRange) -> Self { | 23 | pub fn new(message: impl Into<String>, range: TextRange) -> Self { |
24 | Self(message.into(), range) | 24 | Self(message.into(), range) |
25 | } | 25 | } |
26 | pub fn new_at_offset(message: impl Into<String>, offset: TextUnit) -> Self { | 26 | pub fn new_at_offset(message: impl Into<String>, offset: TextSize) -> Self { |
27 | Self(message.into(), TextRange::offset_len(offset, 0.into())) | 27 | Self(message.into(), TextRange::empty(offset)) |
28 | } | 28 | } |
29 | 29 | ||
30 | pub fn range(&self) -> TextRange { | 30 | pub fn range(&self) -> TextRange { |
diff --git a/crates/ra_syntax/src/syntax_node.rs b/crates/ra_syntax/src/syntax_node.rs index 4e3a1460d..f9d379abf 100644 --- a/crates/ra_syntax/src/syntax_node.rs +++ b/crates/ra_syntax/src/syntax_node.rs | |||
@@ -8,7 +8,7 @@ | |||
8 | 8 | ||
9 | use rowan::{GreenNodeBuilder, Language}; | 9 | use rowan::{GreenNodeBuilder, Language}; |
10 | 10 | ||
11 | use crate::{Parse, SmolStr, SyntaxError, SyntaxKind, TextUnit}; | 11 | use crate::{Parse, SmolStr, SyntaxError, SyntaxKind, TextSize}; |
12 | 12 | ||
13 | pub(crate) use rowan::{GreenNode, GreenToken}; | 13 | pub(crate) use rowan::{GreenNode, GreenToken}; |
14 | 14 | ||
@@ -69,7 +69,7 @@ impl SyntaxTreeBuilder { | |||
69 | self.inner.finish_node() | 69 | self.inner.finish_node() |
70 | } | 70 | } |
71 | 71 | ||
72 | pub fn error(&mut self, error: ra_parser::ParseError, text_pos: TextUnit) { | 72 | pub fn error(&mut self, error: ra_parser::ParseError, text_pos: TextSize) { |
73 | self.errors.push(SyntaxError::new_at_offset(error.0, text_pos)) | 73 | self.errors.push(SyntaxError::new_at_offset(error.0, text_pos)) |
74 | } | 74 | } |
75 | } | 75 | } |
diff --git a/crates/ra_syntax/src/tests.rs b/crates/ra_syntax/src/tests.rs index 355843b94..4f2b67feb 100644 --- a/crates/ra_syntax/src/tests.rs +++ b/crates/ra_syntax/src/tests.rs | |||
@@ -5,7 +5,7 @@ use std::{ | |||
5 | 5 | ||
6 | use test_utils::{collect_rust_files, dir_tests, project_dir, read_text}; | 6 | use test_utils::{collect_rust_files, dir_tests, project_dir, read_text}; |
7 | 7 | ||
8 | use crate::{fuzz, tokenize, SourceFile, SyntaxError, TextRange, TextUnit, Token}; | 8 | use crate::{fuzz, tokenize, SourceFile, SyntaxError, TextRange, TextSize, Token}; |
9 | 9 | ||
10 | #[test] | 10 | #[test] |
11 | fn lexer_tests() { | 11 | fn lexer_tests() { |
@@ -121,12 +121,12 @@ fn assert_errors_are_absent(errors: &[SyntaxError], path: &Path) { | |||
121 | 121 | ||
122 | fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String { | 122 | fn dump_tokens_and_errors(tokens: &[Token], errors: &[SyntaxError], text: &str) -> String { |
123 | let mut acc = String::new(); | 123 | let mut acc = String::new(); |
124 | let mut offset = TextUnit::from_usize(0); | 124 | let mut offset = TextSize::from_usize(0); |
125 | for token in tokens { | 125 | for token in tokens { |
126 | let token_len = token.len; | 126 | let token_len = token.len; |
127 | let token_text = &text[TextRange::offset_len(offset, token.len)]; | 127 | let token_text = &text[TextRange::at(offset, token.len)]; |
128 | offset += token.len; | 128 | offset += token.len; |
129 | writeln!(acc, "{:?} {} {:?}", token.kind, token_len, token_text).unwrap(); | 129 | writeln!(acc, "{:?} {:?} {:?}", token.kind, token_len, token_text).unwrap(); |
130 | } | 130 | } |
131 | for err in errors { | 131 | for err in errors { |
132 | writeln!(acc, "> error{:?} token({:?}) msg({})", err.range(), &text[err.range()], err) | 132 | writeln!(acc, "> error{:?} token({:?}) msg({})", err.range(), &text[err.range()], err) |
diff --git a/crates/ra_syntax/src/validation.rs b/crates/ra_syntax/src/validation.rs index f85b3e61b..77d7e132d 100644 --- a/crates/ra_syntax/src/validation.rs +++ b/crates/ra_syntax/src/validation.rs | |||
@@ -7,7 +7,7 @@ use rustc_lexer::unescape; | |||
7 | use crate::{ | 7 | use crate::{ |
8 | ast, match_ast, AstNode, SyntaxError, | 8 | ast, match_ast, AstNode, SyntaxError, |
9 | SyntaxKind::{BYTE, BYTE_STRING, CHAR, CONST_DEF, FN_DEF, INT_NUMBER, STRING, TYPE_ALIAS_DEF}, | 9 | SyntaxKind::{BYTE, BYTE_STRING, CHAR, CONST_DEF, FN_DEF, INT_NUMBER, STRING, TYPE_ALIAS_DEF}, |
10 | SyntaxNode, SyntaxToken, TextUnit, T, | 10 | SyntaxNode, SyntaxToken, TextSize, T, |
11 | }; | 11 | }; |
12 | 12 | ||
13 | fn rustc_unescape_error_to_string(err: unescape::EscapeError) -> &'static str { | 13 | fn rustc_unescape_error_to_string(err: unescape::EscapeError) -> &'static str { |
@@ -112,7 +112,7 @@ fn validate_literal(literal: ast::Literal, acc: &mut Vec<SyntaxError>) { | |||
112 | 112 | ||
113 | // FIXME: lift this lambda refactor to `fn` (https://github.com/rust-analyzer/rust-analyzer/pull/2834#discussion_r366199205) | 113 | // FIXME: lift this lambda refactor to `fn` (https://github.com/rust-analyzer/rust-analyzer/pull/2834#discussion_r366199205) |
114 | let mut push_err = |prefix_len, (off, err): (usize, unescape::EscapeError)| { | 114 | let mut push_err = |prefix_len, (off, err): (usize, unescape::EscapeError)| { |
115 | let off = token.text_range().start() + TextUnit::from_usize(off + prefix_len); | 115 | let off = token.text_range().start() + TextSize::from_usize(off + prefix_len); |
116 | acc.push(SyntaxError::new_at_offset(rustc_unescape_error_to_string(err), off)); | 116 | acc.push(SyntaxError::new_at_offset(rustc_unescape_error_to_string(err), off)); |
117 | }; | 117 | }; |
118 | 118 | ||
diff --git a/crates/ra_text_edit/Cargo.toml b/crates/ra_text_edit/Cargo.toml index cae28389d..9b0567c98 100644 --- a/crates/ra_text_edit/Cargo.toml +++ b/crates/ra_text_edit/Cargo.toml | |||
@@ -9,5 +9,4 @@ publish = false | |||
9 | doctest = false | 9 | doctest = false |
10 | 10 | ||
11 | [dependencies] | 11 | [dependencies] |
12 | text_unit = "0.1.10" | 12 | text-size = { path = "../../../text-size" } |
13 | |||
diff --git a/crates/ra_text_edit/src/lib.rs b/crates/ra_text_edit/src/lib.rs index f6769e6a6..e656260c7 100644 --- a/crates/ra_text_edit/src/lib.rs +++ b/crates/ra_text_edit/src/lib.rs | |||
@@ -2,7 +2,7 @@ | |||
2 | 2 | ||
3 | mod text_edit; | 3 | mod text_edit; |
4 | 4 | ||
5 | use text_unit::{TextRange, TextUnit}; | 5 | use text_size::{TextRange, TextSize}; |
6 | 6 | ||
7 | pub use crate::text_edit::{TextEdit, TextEditBuilder}; | 7 | pub use crate::text_edit::{TextEdit, TextEditBuilder}; |
8 | 8 | ||
@@ -23,13 +23,13 @@ impl AtomTextEdit { | |||
23 | AtomTextEdit::replace(range, String::new()) | 23 | AtomTextEdit::replace(range, String::new()) |
24 | } | 24 | } |
25 | 25 | ||
26 | pub fn insert(offset: TextUnit, text: String) -> AtomTextEdit { | 26 | pub fn insert(offset: TextSize, text: String) -> AtomTextEdit { |
27 | AtomTextEdit::replace(TextRange::offset_len(offset, 0.into()), text) | 27 | AtomTextEdit::replace(TextRange::empty(offset), text) |
28 | } | 28 | } |
29 | 29 | ||
30 | pub fn apply(&self, mut text: String) -> String { | 30 | pub fn apply(&self, mut text: String) -> String { |
31 | let start = self.delete.start().to_usize(); | 31 | let start: usize = self.delete.start().into(); |
32 | let end = self.delete.end().to_usize(); | 32 | let end: usize = self.delete.end().into(); |
33 | text.replace_range(start..end, &self.insert); | 33 | text.replace_range(start..end, &self.insert); |
34 | text | 34 | text |
35 | } | 35 | } |
diff --git a/crates/ra_text_edit/src/text_edit.rs b/crates/ra_text_edit/src/text_edit.rs index 5c37a08a8..db69a7e7b 100644 --- a/crates/ra_text_edit/src/text_edit.rs +++ b/crates/ra_text_edit/src/text_edit.rs | |||
@@ -1,7 +1,8 @@ | |||
1 | //! FIXME: write short doc here | 1 | //! FIXME: write short doc here |
2 | 2 | ||
3 | use crate::AtomTextEdit; | 3 | use crate::AtomTextEdit; |
4 | use text_unit::{TextRange, TextUnit}; | 4 | // TODO: fix Cargo.toml |
5 | use text_size::{TextRange, TextSize}; | ||
5 | 6 | ||
6 | #[derive(Debug, Clone)] | 7 | #[derive(Debug, Clone)] |
7 | pub struct TextEdit { | 8 | pub struct TextEdit { |
@@ -20,19 +21,19 @@ impl TextEditBuilder { | |||
20 | pub fn delete(&mut self, range: TextRange) { | 21 | pub fn delete(&mut self, range: TextRange) { |
21 | self.atoms.push(AtomTextEdit::delete(range)) | 22 | self.atoms.push(AtomTextEdit::delete(range)) |
22 | } | 23 | } |
23 | pub fn insert(&mut self, offset: TextUnit, text: String) { | 24 | pub fn insert(&mut self, offset: TextSize, text: String) { |
24 | self.atoms.push(AtomTextEdit::insert(offset, text)) | 25 | self.atoms.push(AtomTextEdit::insert(offset, text)) |
25 | } | 26 | } |
26 | pub fn finish(self) -> TextEdit { | 27 | pub fn finish(self) -> TextEdit { |
27 | TextEdit::from_atoms(self.atoms) | 28 | TextEdit::from_atoms(self.atoms) |
28 | } | 29 | } |
29 | pub fn invalidates_offset(&self, offset: TextUnit) -> bool { | 30 | pub fn invalidates_offset(&self, offset: TextSize) -> bool { |
30 | self.atoms.iter().any(|atom| atom.delete.contains_inclusive(offset)) | 31 | self.atoms.iter().any(|atom| atom.delete.contains_inclusive(offset)) |
31 | } | 32 | } |
32 | } | 33 | } |
33 | 34 | ||
34 | impl TextEdit { | 35 | impl TextEdit { |
35 | pub fn insert(offset: TextUnit, text: String) -> TextEdit { | 36 | pub fn insert(offset: TextSize, text: String) -> TextEdit { |
36 | let mut builder = TextEditBuilder::default(); | 37 | let mut builder = TextEditBuilder::default(); |
37 | builder.insert(offset, text); | 38 | builder.insert(offset, text); |
38 | builder.finish() | 39 | builder.finish() |
@@ -63,16 +64,16 @@ impl TextEdit { | |||
63 | } | 64 | } |
64 | 65 | ||
65 | pub fn apply(&self, text: &str) -> String { | 66 | pub fn apply(&self, text: &str) -> String { |
66 | let mut total_len = TextUnit::of_str(text); | 67 | let mut total_len = TextSize::of(text); |
67 | for atom in self.atoms.iter() { | 68 | for atom in self.atoms.iter() { |
68 | total_len += TextUnit::of_str(&atom.insert); | 69 | total_len += TextSize::of(&atom.insert); |
69 | total_len -= atom.delete.end() - atom.delete.start(); | 70 | total_len -= atom.delete.end() - atom.delete.start(); |
70 | } | 71 | } |
71 | let mut buf = String::with_capacity(total_len.to_usize()); | 72 | let mut buf = String::with_capacity(total_len.into()); |
72 | let mut prev = 0; | 73 | let mut prev = 0; |
73 | for atom in self.atoms.iter() { | 74 | for atom in self.atoms.iter() { |
74 | let start = atom.delete.start().to_usize(); | 75 | let start: usize = atom.delete.start().into(); |
75 | let end = atom.delete.end().to_usize(); | 76 | let end: usize = atom.delete.end().into(); |
76 | if start > prev { | 77 | if start > prev { |
77 | buf.push_str(&text[prev..start]); | 78 | buf.push_str(&text[prev..start]); |
78 | } | 79 | } |
@@ -80,11 +81,11 @@ impl TextEdit { | |||
80 | prev = end; | 81 | prev = end; |
81 | } | 82 | } |
82 | buf.push_str(&text[prev..text.len()]); | 83 | buf.push_str(&text[prev..text.len()]); |
83 | assert_eq!(TextUnit::of_str(&buf), total_len); | 84 | assert_eq!(TextSize::of(&buf), total_len); |
84 | buf | 85 | buf |
85 | } | 86 | } |
86 | 87 | ||
87 | pub fn apply_to_offset(&self, offset: TextUnit) -> Option<TextUnit> { | 88 | pub fn apply_to_offset(&self, offset: TextSize) -> Option<TextSize> { |
88 | let mut res = offset; | 89 | let mut res = offset; |
89 | for atom in self.atoms.iter() { | 90 | for atom in self.atoms.iter() { |
90 | if atom.delete.start() >= offset { | 91 | if atom.delete.start() >= offset { |
@@ -93,7 +94,7 @@ impl TextEdit { | |||
93 | if offset < atom.delete.end() { | 94 | if offset < atom.delete.end() { |
94 | return None; | 95 | return None; |
95 | } | 96 | } |
96 | res += TextUnit::of_str(&atom.insert); | 97 | res += TextSize::of(&atom.insert); |
97 | res -= atom.delete.len(); | 98 | res -= atom.delete.len(); |
98 | } | 99 | } |
99 | Some(res) | 100 | Some(res) |
diff --git a/crates/rust-analyzer/src/cli/analysis_stats.rs b/crates/rust-analyzer/src/cli/analysis_stats.rs index 9fa7dad71..72183da15 100644 --- a/crates/rust-analyzer/src/cli/analysis_stats.rs +++ b/crates/rust-analyzer/src/cli/analysis_stats.rs | |||
@@ -130,7 +130,7 @@ pub fn analysis_stats( | |||
130 | let original_file = src.file_id.original_file(db); | 130 | let original_file = src.file_id.original_file(db); |
131 | let path = db.file_relative_path(original_file); | 131 | let path = db.file_relative_path(original_file); |
132 | let syntax_range = src.value.syntax().text_range(); | 132 | let syntax_range = src.value.syntax().text_range(); |
133 | format_to!(msg, " ({:?} {})", path, syntax_range); | 133 | format_to!(msg, " ({:?} {:?})", path, syntax_range); |
134 | } | 134 | } |
135 | if verbosity.is_spammy() { | 135 | if verbosity.is_spammy() { |
136 | bar.println(msg.to_string()); | 136 | bar.println(msg.to_string()); |
diff --git a/crates/rust-analyzer/src/conv.rs b/crates/rust-analyzer/src/conv.rs index 2285cb1d3..b0f911f71 100644 --- a/crates/rust-analyzer/src/conv.rs +++ b/crates/rust-analyzer/src/conv.rs | |||
@@ -14,7 +14,7 @@ use ra_ide::{ | |||
14 | InlayHint, InlayKind, InsertTextFormat, LineCol, LineIndex, NavigationTarget, RangeInfo, | 14 | InlayHint, InlayKind, InsertTextFormat, LineCol, LineIndex, NavigationTarget, RangeInfo, |
15 | ReferenceAccess, Severity, SourceChange, SourceFileEdit, | 15 | ReferenceAccess, Severity, SourceChange, SourceFileEdit, |
16 | }; | 16 | }; |
17 | use ra_syntax::{SyntaxKind, TextRange, TextUnit}; | 17 | use ra_syntax::{SyntaxKind, TextRange, TextSize}; |
18 | use ra_text_edit::{AtomTextEdit, TextEdit}; | 18 | use ra_text_edit::{AtomTextEdit, TextEdit}; |
19 | use ra_vfs::LineEndings; | 19 | use ra_vfs::LineEndings; |
20 | 20 | ||
@@ -124,13 +124,13 @@ impl ConvWith<(&LineIndex, LineEndings)> for CompletionItem { | |||
124 | // LSP does not allow arbitrary edits in completion, so we have to do a | 124 | // LSP does not allow arbitrary edits in completion, so we have to do a |
125 | // non-trivial mapping here. | 125 | // non-trivial mapping here. |
126 | for atom_edit in self.text_edit().as_atoms() { | 126 | for atom_edit in self.text_edit().as_atoms() { |
127 | if self.source_range().is_subrange(&atom_edit.delete) { | 127 | if atom_edit.delete.contains_range(self.source_range()) { |
128 | text_edit = Some(if atom_edit.delete == self.source_range() { | 128 | text_edit = Some(if atom_edit.delete == self.source_range() { |
129 | atom_edit.conv_with((ctx.0, ctx.1)) | 129 | atom_edit.conv_with((ctx.0, ctx.1)) |
130 | } else { | 130 | } else { |
131 | assert!(self.source_range().end() == atom_edit.delete.end()); | 131 | assert!(self.source_range().end() == atom_edit.delete.end()); |
132 | let range1 = | 132 | let range1 = |
133 | TextRange::from_to(atom_edit.delete.start(), self.source_range().start()); | 133 | TextRange::new(atom_edit.delete.start(), self.source_range().start()); |
134 | let range2 = self.source_range(); | 134 | let range2 = self.source_range(); |
135 | let edit1 = AtomTextEdit::replace(range1, String::new()); | 135 | let edit1 = AtomTextEdit::replace(range1, String::new()); |
136 | let edit2 = AtomTextEdit::replace(range2, atom_edit.insert.clone()); | 136 | let edit2 = AtomTextEdit::replace(range2, atom_edit.insert.clone()); |
@@ -138,7 +138,7 @@ impl ConvWith<(&LineIndex, LineEndings)> for CompletionItem { | |||
138 | edit2.conv_with((ctx.0, ctx.1)) | 138 | edit2.conv_with((ctx.0, ctx.1)) |
139 | }) | 139 | }) |
140 | } else { | 140 | } else { |
141 | assert!(self.source_range().intersection(&atom_edit.delete).is_none()); | 141 | assert!(self.source_range().intersect(atom_edit.delete).is_none()); |
142 | additional_text_edits.push(atom_edit.conv_with((ctx.0, ctx.1))); | 142 | additional_text_edits.push(atom_edit.conv_with((ctx.0, ctx.1))); |
143 | } | 143 | } |
144 | } | 144 | } |
@@ -184,15 +184,15 @@ impl ConvWith<(&LineIndex, LineEndings)> for CompletionItem { | |||
184 | } | 184 | } |
185 | 185 | ||
186 | impl ConvWith<&LineIndex> for Position { | 186 | impl ConvWith<&LineIndex> for Position { |
187 | type Output = TextUnit; | 187 | type Output = TextSize; |
188 | 188 | ||
189 | fn conv_with(self, line_index: &LineIndex) -> TextUnit { | 189 | fn conv_with(self, line_index: &LineIndex) -> TextSize { |
190 | let line_col = LineCol { line: self.line as u32, col_utf16: self.character as u32 }; | 190 | let line_col = LineCol { line: self.line as u32, col_utf16: self.character as u32 }; |
191 | line_index.offset(line_col) | 191 | line_index.offset(line_col) |
192 | } | 192 | } |
193 | } | 193 | } |
194 | 194 | ||
195 | impl ConvWith<&LineIndex> for TextUnit { | 195 | impl ConvWith<&LineIndex> for TextSize { |
196 | type Output = Position; | 196 | type Output = Position; |
197 | 197 | ||
198 | fn conv_with(self, line_index: &LineIndex) -> Position { | 198 | fn conv_with(self, line_index: &LineIndex) -> Position { |
@@ -213,7 +213,7 @@ impl ConvWith<&LineIndex> for Range { | |||
213 | type Output = TextRange; | 213 | type Output = TextRange; |
214 | 214 | ||
215 | fn conv_with(self, line_index: &LineIndex) -> TextRange { | 215 | fn conv_with(self, line_index: &LineIndex) -> TextRange { |
216 | TextRange::from_to(self.start.conv_with(line_index), self.end.conv_with(line_index)) | 216 | TextRange::new(self.start.conv_with(line_index), self.end.conv_with(line_index)) |
217 | } | 217 | } |
218 | } | 218 | } |
219 | 219 | ||
@@ -300,7 +300,7 @@ impl ConvWith<&FoldConvCtx<'_>> for Fold { | |||
300 | // range.end.line from the folding region if there is more text after range.end | 300 | // range.end.line from the folding region if there is more text after range.end |
301 | // on the same line. | 301 | // on the same line. |
302 | let has_more_text_on_end_line = ctx.text | 302 | let has_more_text_on_end_line = ctx.text |
303 | [TextRange::from_to(self.range.end(), TextUnit::of_str(ctx.text))] | 303 | [TextRange::new(self.range.end(), TextSize::of(ctx.text))] |
304 | .chars() | 304 | .chars() |
305 | .take_while(|it| *it != '\n') | 305 | .take_while(|it| *it != '\n') |
306 | .any(|it| !it.is_whitespace()); | 306 | .any(|it| !it.is_whitespace()); |
diff --git a/crates/rust-analyzer/src/main_loop/handlers.rs b/crates/rust-analyzer/src/main_loop/handlers.rs index 41d9fe344..381f37f16 100644 --- a/crates/rust-analyzer/src/main_loop/handlers.rs +++ b/crates/rust-analyzer/src/main_loop/handlers.rs | |||
@@ -23,7 +23,7 @@ use ra_ide::{ | |||
23 | SearchScope, | 23 | SearchScope, |
24 | }; | 24 | }; |
25 | use ra_prof::profile; | 25 | use ra_prof::profile; |
26 | use ra_syntax::{AstNode, SyntaxKind, TextRange, TextUnit}; | 26 | use ra_syntax::{AstNode, SyntaxKind, TextRange, TextSize}; |
27 | use rustc_hash::FxHashMap; | 27 | use rustc_hash::FxHashMap; |
28 | use serde::{Deserialize, Serialize}; | 28 | use serde::{Deserialize, Serialize}; |
29 | use serde_json::to_value; | 29 | use serde_json::to_value; |
@@ -97,7 +97,7 @@ pub fn handle_selection_range( | |||
97 | .map(|position| { | 97 | .map(|position| { |
98 | let mut ranges = Vec::new(); | 98 | let mut ranges = Vec::new(); |
99 | { | 99 | { |
100 | let mut range = TextRange::from_to(position, position); | 100 | let mut range = TextRange::new(position, position); |
101 | loop { | 101 | loop { |
102 | ranges.push(range); | 102 | ranges.push(range); |
103 | let frange = FileRange { file_id, range }; | 103 | let frange = FileRange { file_id, range }; |
@@ -184,11 +184,11 @@ pub fn handle_on_type_formatting( | |||
184 | 184 | ||
185 | // in `ra_ide`, the `on_type` invariant is that | 185 | // in `ra_ide`, the `on_type` invariant is that |
186 | // `text.char_at(position) == typed_char`. | 186 | // `text.char_at(position) == typed_char`. |
187 | position.offset -= TextUnit::of_char('.'); | 187 | position.offset -= TextSize::of('.'); |
188 | let char_typed = params.ch.chars().next().unwrap_or('\0'); | 188 | let char_typed = params.ch.chars().next().unwrap_or('\0'); |
189 | assert!({ | 189 | assert!({ |
190 | let text = world.analysis().file_text(position.file_id)?; | 190 | let text = world.analysis().file_text(position.file_id)?; |
191 | text[position.offset.to_usize()..].starts_with(char_typed) | 191 | text[usize::from(position.offset)..].starts_with(char_typed) |
192 | }); | 192 | }); |
193 | 193 | ||
194 | // We have an assist that inserts ` ` after typing `->` in `fn foo() ->{`, | 194 | // We have an assist that inserts ` ` after typing `->` in `fn foo() ->{`, |
@@ -403,7 +403,7 @@ pub fn handle_completion( | |||
403 | let syntax = source_file.syntax(); | 403 | let syntax = source_file.syntax(); |
404 | let text = syntax.text(); | 404 | let text = syntax.text(); |
405 | if let Some(next_char) = text.char_at(position.offset) { | 405 | if let Some(next_char) = text.char_at(position.offset) { |
406 | let diff = TextUnit::of_char(next_char) + TextUnit::of_char(':'); | 406 | let diff = TextSize::of(next_char) + TextSize::of(':'); |
407 | let prev_char = position.offset - diff; | 407 | let prev_char = position.offset - diff; |
408 | if text.char_at(prev_char) != Some(':') { | 408 | if text.char_at(prev_char) != Some(':') { |
409 | res = true; | 409 | res = true; |
@@ -592,7 +592,7 @@ pub fn handle_formatting( | |||
592 | let crate_ids = world.analysis().crate_for(file_id)?; | 592 | let crate_ids = world.analysis().crate_for(file_id)?; |
593 | 593 | ||
594 | let file_line_index = world.analysis().file_line_index(file_id)?; | 594 | let file_line_index = world.analysis().file_line_index(file_id)?; |
595 | let end_position = TextUnit::of_str(&file).conv_with(&file_line_index); | 595 | let end_position = TextSize::of(&file).conv_with(&file_line_index); |
596 | 596 | ||
597 | let mut rustfmt = match &world.config.rustfmt { | 597 | let mut rustfmt = match &world.config.rustfmt { |
598 | RustfmtConfig::Rustfmt { extra_args } => { | 598 | RustfmtConfig::Rustfmt { extra_args } => { |
@@ -698,7 +698,7 @@ pub fn handle_code_action( | |||
698 | let fixes_from_diagnostics = diagnostics | 698 | let fixes_from_diagnostics = diagnostics |
699 | .into_iter() | 699 | .into_iter() |
700 | .filter_map(|d| Some((d.range, d.fix?))) | 700 | .filter_map(|d| Some((d.range, d.fix?))) |
701 | .filter(|(diag_range, _fix)| diag_range.intersection(&range).is_some()) | 701 | .filter(|(diag_range, _fix)| diag_range.intersect(range).is_some()) |
702 | .map(|(_range, fix)| fix); | 702 | .map(|(_range, fix)| fix); |
703 | 703 | ||
704 | for source_edit in fixes_from_diagnostics { | 704 | for source_edit in fixes_from_diagnostics { |
@@ -723,7 +723,7 @@ pub fn handle_code_action( | |||
723 | 723 | ||
724 | for fix in world.check_fixes.get(&file_id).into_iter().flatten() { | 724 | for fix in world.check_fixes.get(&file_id).into_iter().flatten() { |
725 | let fix_range = fix.range.conv_with(&line_index); | 725 | let fix_range = fix.range.conv_with(&line_index); |
726 | if fix_range.intersection(&range).is_none() { | 726 | if fix_range.intersect(range).is_none() { |
727 | continue; | 727 | continue; |
728 | } | 728 | } |
729 | res.push(fix.action.clone()); | 729 | res.push(fix.action.clone()); |
@@ -1107,7 +1107,7 @@ pub fn handle_semantic_tokens( | |||
1107 | let (token_index, modifier_bitset) = highlight_range.highlight.conv(); | 1107 | let (token_index, modifier_bitset) = highlight_range.highlight.conv(); |
1108 | for mut range in line_index.lines(highlight_range.range) { | 1108 | for mut range in line_index.lines(highlight_range.range) { |
1109 | if text[range].ends_with('\n') { | 1109 | if text[range].ends_with('\n') { |
1110 | range = TextRange::from_to(range.start(), range.end() - TextUnit::of_char('\n')); | 1110 | range = TextRange::new(range.start(), range.end() - TextSize::of('\n')); |
1111 | } | 1111 | } |
1112 | let range = range.conv_with(&line_index); | 1112 | let range = range.conv_with(&line_index); |
1113 | builder.push(range, token_index, modifier_bitset); | 1113 | builder.push(range, token_index, modifier_bitset); |
diff --git a/crates/test_utils/Cargo.toml b/crates/test_utils/Cargo.toml index 6a7c6d6f9..652ab4537 100644 --- a/crates/test_utils/Cargo.toml +++ b/crates/test_utils/Cargo.toml | |||
@@ -9,5 +9,5 @@ doctest = false | |||
9 | 9 | ||
10 | [dependencies] | 10 | [dependencies] |
11 | difference = "2.0.0" | 11 | difference = "2.0.0" |
12 | text_unit = "0.1.10" | 12 | text-size = { path = "../../../text-size" } |
13 | serde_json = "1.0.48" | 13 | serde_json = "1.0.48" |
diff --git a/crates/test_utils/src/lib.rs b/crates/test_utils/src/lib.rs index 4164bfd5e..b1365444a 100644 --- a/crates/test_utils/src/lib.rs +++ b/crates/test_utils/src/lib.rs | |||
@@ -15,7 +15,7 @@ use std::{ | |||
15 | }; | 15 | }; |
16 | 16 | ||
17 | use serde_json::Value; | 17 | use serde_json::Value; |
18 | use text_unit::{TextRange, TextUnit}; | 18 | use text_size::{TextRange, TextSize}; |
19 | 19 | ||
20 | pub use difference::Changeset as __Changeset; | 20 | pub use difference::Changeset as __Changeset; |
21 | 21 | ||
@@ -49,7 +49,7 @@ macro_rules! assert_eq_text { | |||
49 | } | 49 | } |
50 | 50 | ||
51 | /// Infallible version of `try_extract_offset()`. | 51 | /// Infallible version of `try_extract_offset()`. |
52 | pub fn extract_offset(text: &str) -> (TextUnit, String) { | 52 | pub fn extract_offset(text: &str) -> (TextSize, String) { |
53 | match try_extract_offset(text) { | 53 | match try_extract_offset(text) { |
54 | None => panic!("text should contain cursor marker"), | 54 | None => panic!("text should contain cursor marker"), |
55 | Some(result) => result, | 55 | Some(result) => result, |
@@ -58,12 +58,12 @@ pub fn extract_offset(text: &str) -> (TextUnit, String) { | |||
58 | 58 | ||
59 | /// Returns the offset of the first occurence of `<|>` marker and the copy of `text` | 59 | /// Returns the offset of the first occurence of `<|>` marker and the copy of `text` |
60 | /// without the marker. | 60 | /// without the marker. |
61 | fn try_extract_offset(text: &str) -> Option<(TextUnit, String)> { | 61 | fn try_extract_offset(text: &str) -> Option<(TextSize, String)> { |
62 | let cursor_pos = text.find(CURSOR_MARKER)?; | 62 | let cursor_pos = text.find(CURSOR_MARKER)?; |
63 | let mut new_text = String::with_capacity(text.len() - CURSOR_MARKER.len()); | 63 | let mut new_text = String::with_capacity(text.len() - CURSOR_MARKER.len()); |
64 | new_text.push_str(&text[..cursor_pos]); | 64 | new_text.push_str(&text[..cursor_pos]); |
65 | new_text.push_str(&text[cursor_pos + CURSOR_MARKER.len()..]); | 65 | new_text.push_str(&text[cursor_pos + CURSOR_MARKER.len()..]); |
66 | let cursor_pos = TextUnit::from(cursor_pos as u32); | 66 | let cursor_pos = TextSize::from(cursor_pos as u32); |
67 | Some((cursor_pos, new_text)) | 67 | Some((cursor_pos, new_text)) |
68 | } | 68 | } |
69 | 69 | ||
@@ -80,25 +80,25 @@ pub fn extract_range(text: &str) -> (TextRange, String) { | |||
80 | fn try_extract_range(text: &str) -> Option<(TextRange, String)> { | 80 | fn try_extract_range(text: &str) -> Option<(TextRange, String)> { |
81 | let (start, text) = try_extract_offset(text)?; | 81 | let (start, text) = try_extract_offset(text)?; |
82 | let (end, text) = try_extract_offset(&text)?; | 82 | let (end, text) = try_extract_offset(&text)?; |
83 | Some((TextRange::from_to(start, end), text)) | 83 | Some((TextRange::new(start, end), text)) |
84 | } | 84 | } |
85 | 85 | ||
86 | #[derive(Clone, Copy)] | 86 | #[derive(Clone, Copy)] |
87 | pub enum RangeOrOffset { | 87 | pub enum RangeOrOffset { |
88 | Range(TextRange), | 88 | Range(TextRange), |
89 | Offset(TextUnit), | 89 | Offset(TextSize), |
90 | } | 90 | } |
91 | 91 | ||
92 | impl From<RangeOrOffset> for TextRange { | 92 | impl From<RangeOrOffset> for TextRange { |
93 | fn from(selection: RangeOrOffset) -> Self { | 93 | fn from(selection: RangeOrOffset) -> Self { |
94 | match selection { | 94 | match selection { |
95 | RangeOrOffset::Range(it) => it, | 95 | RangeOrOffset::Range(it) => it, |
96 | RangeOrOffset::Offset(it) => TextRange::from_to(it, it), | 96 | RangeOrOffset::Offset(it) => TextRange::new(it, it), |
97 | } | 97 | } |
98 | } | 98 | } |
99 | } | 99 | } |
100 | 100 | ||
101 | /// Extracts `TextRange` or `TextUnit` depending on the amount of `<|>` markers | 101 | /// Extracts `TextRange` or `TextSize` depending on the amount of `<|>` markers |
102 | /// found in `text`. | 102 | /// found in `text`. |
103 | /// | 103 | /// |
104 | /// # Panics | 104 | /// # Panics |
@@ -129,13 +129,13 @@ pub fn extract_ranges(mut text: &str, tag: &str) -> (Vec<TextRange>, String) { | |||
129 | text = &text[i..]; | 129 | text = &text[i..]; |
130 | if text.starts_with(&open) { | 130 | if text.starts_with(&open) { |
131 | text = &text[open.len()..]; | 131 | text = &text[open.len()..]; |
132 | let from = TextUnit::of_str(&res); | 132 | let from = TextSize::of(&res); |
133 | stack.push(from); | 133 | stack.push(from); |
134 | } else if text.starts_with(&close) { | 134 | } else if text.starts_with(&close) { |
135 | text = &text[close.len()..]; | 135 | text = &text[close.len()..]; |
136 | let from = stack.pop().unwrap_or_else(|| panic!("unmatched </{}>", tag)); | 136 | let from = stack.pop().unwrap_or_else(|| panic!("unmatched </{}>", tag)); |
137 | let to = TextUnit::of_str(&res); | 137 | let to = TextSize::of(&res); |
138 | ranges.push(TextRange::from_to(from, to)); | 138 | ranges.push(TextRange::new(from, to)); |
139 | } | 139 | } |
140 | } | 140 | } |
141 | } | 141 | } |
@@ -146,8 +146,8 @@ pub fn extract_ranges(mut text: &str, tag: &str) -> (Vec<TextRange>, String) { | |||
146 | } | 146 | } |
147 | 147 | ||
148 | /// Inserts `<|>` marker into the `text` at `offset`. | 148 | /// Inserts `<|>` marker into the `text` at `offset`. |
149 | pub fn add_cursor(text: &str, offset: TextUnit) -> String { | 149 | pub fn add_cursor(text: &str, offset: TextSize) -> String { |
150 | let offset: usize = offset.to_usize(); | 150 | let offset: usize = offset.into(); |
151 | let mut res = String::new(); | 151 | let mut res = String::new(); |
152 | res.push_str(&text[..offset]); | 152 | res.push_str(&text[..offset]); |
153 | res.push_str("<|>"); | 153 | res.push_str("<|>"); |