diff options
Diffstat (limited to 'crates/ra_ide')
-rw-r--r-- | crates/ra_ide/src/extend_selection.rs | 91 |
1 files changed, 47 insertions, 44 deletions
diff --git a/crates/ra_ide/src/extend_selection.rs b/crates/ra_ide/src/extend_selection.rs index 8048c7be9..9b6bbe82d 100644 --- a/crates/ra_ide/src/extend_selection.rs +++ b/crates/ra_ide/src/extend_selection.rs | |||
@@ -11,7 +11,7 @@ use ra_syntax::{ | |||
11 | 11 | ||
12 | use crate::{db::RootDatabase, expand::descend_into_macros, FileId, FileRange}; | 12 | use crate::{db::RootDatabase, expand::descend_into_macros, FileId, FileRange}; |
13 | use hir::db::AstDatabase; | 13 | use hir::db::AstDatabase; |
14 | use itertools::Itertools; | 14 | use std::iter::successors; |
15 | 15 | ||
16 | pub(crate) fn extend_selection(db: &RootDatabase, frange: FileRange) -> TextRange { | 16 | pub(crate) fn extend_selection(db: &RootDatabase, frange: FileRange) -> TextRange { |
17 | let src = db.parse(frange.file_id).tree(); | 17 | let src = db.parse(frange.file_id).tree(); |
@@ -110,46 +110,28 @@ fn extend_tokens_from_range( | |||
110 | macro_call: ast::MacroCall, | 110 | macro_call: ast::MacroCall, |
111 | original_range: TextRange, | 111 | original_range: TextRange, |
112 | ) -> Option<TextRange> { | 112 | ) -> Option<TextRange> { |
113 | // Find all non-whitespace tokens under MacroCall | 113 | let src = find_covering_element(¯o_call.syntax(), original_range); |
114 | let all_tokens: Vec<_> = macro_call | 114 | let (first_token, last_token) = match src { |
115 | .syntax() | 115 | NodeOrToken::Node(it) => (it.first_token()?, it.last_token()?), |
116 | .descendants_with_tokens() | 116 | NodeOrToken::Token(it) => (it.clone(), it), |
117 | .filter_map(|n| { | 117 | }; |
118 | let token = n.as_token()?; | 118 | |
119 | if token.kind() == WHITESPACE { | 119 | let mut first_token = skip_whitespace(first_token, Direction::Next)?; |
120 | None | 120 | let mut last_token = skip_whitespace(last_token, Direction::Prev)?; |
121 | } else { | ||
122 | Some(token.clone()) | ||
123 | } | ||
124 | }) | ||
125 | .sorted_by(|a, b| Ord::cmp(&a.text_range().start(), &b.text_range().start())) | ||
126 | .collect(); | ||
127 | |||
128 | // Get all indices which is in original range | ||
129 | let indices: Vec<_> = | ||
130 | all_tokens | ||
131 | .iter() | ||
132 | .enumerate() | ||
133 | .filter_map(|(i, token)| { | ||
134 | if token.text_range().is_subrange(&original_range) { | ||
135 | Some(i) | ||
136 | } else { | ||
137 | None | ||
138 | } | ||
139 | }) | ||
140 | .collect(); | ||
141 | 121 | ||
142 | // The first and last token index in original_range | 122 | while !first_token.text_range().is_subrange(&original_range) { |
143 | // Note that the indices is sorted | 123 | first_token = skip_whitespace(first_token.next_token()?, Direction::Next)?; |
144 | let first_idx = *indices.first()?; | 124 | } |
145 | let last_idx = *indices.last()?; | 125 | while !last_token.text_range().is_subrange(&original_range) { |
126 | last_token = skip_whitespace(last_token.prev_token()?, Direction::Prev)?; | ||
127 | } | ||
146 | 128 | ||
147 | // compute original mapped token range | 129 | // compute original mapped token range |
148 | let expanded = { | 130 | let expanded = { |
149 | let first_node = descend_into_macros(db, file_id, all_tokens[first_idx].clone()); | 131 | let first_node = descend_into_macros(db, file_id, first_token.clone()); |
150 | let first_node = first_node.map(|it| it.text_range()); | 132 | let first_node = first_node.map(|it| it.text_range()); |
151 | 133 | ||
152 | let last_node = descend_into_macros(db, file_id, all_tokens[last_idx].clone()); | 134 | let last_node = descend_into_macros(db, file_id, last_token.clone()); |
153 | if last_node.file_id == file_id.into() || first_node.file_id != last_node.file_id { | 135 | if last_node.file_id == file_id.into() || first_node.file_id != last_node.file_id { |
154 | return None; | 136 | return None; |
155 | } | 137 | } |
@@ -160,20 +142,28 @@ fn extend_tokens_from_range( | |||
160 | let src = db.parse_or_expand(expanded.file_id)?; | 142 | let src = db.parse_or_expand(expanded.file_id)?; |
161 | let parent = shallowest_node(&find_covering_element(&src, expanded.value))?.parent()?; | 143 | let parent = shallowest_node(&find_covering_element(&src, expanded.value))?.parent()?; |
162 | 144 | ||
163 | let validate = |&idx: &usize| { | 145 | let validate = |token: SyntaxToken| { |
164 | let token: &SyntaxToken = &all_tokens[idx]; | ||
165 | let node = descend_into_macros(db, file_id, token.clone()); | 146 | let node = descend_into_macros(db, file_id, token.clone()); |
166 | 147 | if node.file_id == expanded.file_id | |
167 | node.file_id == expanded.file_id | ||
168 | && node.value.text_range().is_subrange(&parent.text_range()) | 148 | && node.value.text_range().is_subrange(&parent.text_range()) |
149 | { | ||
150 | Some(token) | ||
151 | } else { | ||
152 | None | ||
153 | } | ||
169 | }; | 154 | }; |
170 | 155 | ||
171 | // Find the first and last text range under expanded parent | 156 | // Find the first and last text range under expanded parent |
172 | let first = (0..=first_idx).rev().take_while(validate).last()?; | 157 | let first = successors(Some(first_token), |token| { |
173 | let last = (last_idx..all_tokens.len()).take_while(validate).last()?; | 158 | validate(skip_whitespace(token.prev_token()?, Direction::Prev)?) |
174 | 159 | }) | |
175 | let range = union_range(all_tokens[first].text_range(), all_tokens[last].text_range()); | 160 | .last()?; |
176 | 161 | let last = successors(Some(last_token), |token| { | |
162 | validate(skip_whitespace(token.next_token()?, Direction::Next)?) | ||
163 | }) | ||
164 | .last()?; | ||
165 | |||
166 | let range = union_range(first.text_range(), last.text_range()); | ||
177 | if original_range.is_subrange(&range) && original_range != range { | 167 | if original_range.is_subrange(&range) && original_range != range { |
178 | Some(range) | 168 | Some(range) |
179 | } else { | 169 | } else { |
@@ -181,6 +171,19 @@ fn extend_tokens_from_range( | |||
181 | } | 171 | } |
182 | } | 172 | } |
183 | 173 | ||
174 | fn skip_whitespace( | ||
175 | mut token: SyntaxToken, | ||
176 | direction: Direction, | ||
177 | ) -> Option<SyntaxToken> { | ||
178 | while token.kind() == WHITESPACE { | ||
179 | token = match direction { | ||
180 | Direction::Next => token.next_token()?, | ||
181 | Direction::Prev => token.prev_token()?, | ||
182 | } | ||
183 | } | ||
184 | Some(token) | ||
185 | } | ||
186 | |||
184 | fn union_range(range: TextRange, r: TextRange) -> TextRange { | 187 | fn union_range(range: TextRange, r: TextRange) -> TextRange { |
185 | let start = range.start().min(r.start()); | 188 | let start = range.start().min(r.start()); |
186 | let end = range.end().max(r.end()); | 189 | let end = range.end().max(r.end()); |