diff options
author | Edwin Cheng <[email protected]> | 2020-03-20 19:04:11 +0000 |
---|---|---|
committer | Edwin Cheng <[email protected]> | 2020-03-20 19:08:56 +0000 |
commit | 622c843a4c598befaf459e64e9f75f31b4886a5b (patch) | |
tree | 6a46953e52aeb02f724fd87afe9fab2cec1c7691 /crates/ra_mbe/src | |
parent | 27c516970bea6bee9d6bca52ac9ad619412809ef (diff) |
Add TokenConvertor trait
Diffstat (limited to 'crates/ra_mbe/src')
-rw-r--r-- | crates/ra_mbe/src/syntax_bridge.rs | 388 | ||||
-rw-r--r-- | crates/ra_mbe/src/tests.rs | 6 |
2 files changed, 155 insertions, 239 deletions
diff --git a/crates/ra_mbe/src/syntax_bridge.rs b/crates/ra_mbe/src/syntax_bridge.rs index 0678c37ee..540afc87c 100644 --- a/crates/ra_mbe/src/syntax_bridge.rs +++ b/crates/ra_mbe/src/syntax_bridge.rs | |||
@@ -3,12 +3,11 @@ | |||
3 | use ra_parser::{FragmentKind, ParseError, TreeSink}; | 3 | use ra_parser::{FragmentKind, ParseError, TreeSink}; |
4 | use ra_syntax::{ | 4 | use ra_syntax::{ |
5 | ast::{self, make::tokens::doc_comment}, | 5 | ast::{self, make::tokens::doc_comment}, |
6 | tokenize, AstToken, NodeOrToken, Parse, SmolStr, SyntaxKind, | 6 | tokenize, AstToken, Parse, SmolStr, SyntaxKind, |
7 | SyntaxKind::*, | 7 | SyntaxKind::*, |
8 | SyntaxNode, SyntaxTreeBuilder, TextRange, TextUnit, Token, T, | 8 | SyntaxNode, SyntaxToken, SyntaxTreeBuilder, TextRange, TextUnit, Token as RawToken, T, |
9 | }; | 9 | }; |
10 | use rustc_hash::FxHashMap; | 10 | use rustc_hash::FxHashMap; |
11 | use std::iter::successors; | ||
12 | use tt::buffer::{Cursor, TokenBuffer}; | 11 | use tt::buffer::{Cursor, TokenBuffer}; |
13 | 12 | ||
14 | use crate::subtree_source::SubtreeTokenSource; | 13 | use crate::subtree_source::SubtreeTokenSource; |
@@ -50,10 +49,8 @@ pub fn ast_to_token_tree(ast: &impl ast::AstNode) -> Option<(tt::Subtree, TokenM | |||
50 | /// will consume). | 49 | /// will consume). |
51 | pub fn syntax_node_to_token_tree(node: &SyntaxNode) -> Option<(tt::Subtree, TokenMap)> { | 50 | pub fn syntax_node_to_token_tree(node: &SyntaxNode) -> Option<(tt::Subtree, TokenMap)> { |
52 | let global_offset = node.text_range().start(); | 51 | let global_offset = node.text_range().start(); |
53 | let mut c = Convertor { | 52 | let mut c = Convertor::new(node, global_offset); |
54 | id_alloc: { TokenIdAlloc { map: TokenMap::default(), global_offset, next_id: 0 } }, | 53 | let subtree = c.go()?; |
55 | }; | ||
56 | let subtree = c.go(node)?; | ||
57 | Some((subtree, c.id_alloc.map)) | 54 | Some((subtree, c.id_alloc.map)) |
58 | } | 55 | } |
59 | 56 | ||
@@ -237,16 +234,6 @@ impl TokenIdAlloc { | |||
237 | token_id | 234 | token_id |
238 | } | 235 | } |
239 | 236 | ||
240 | fn delim(&mut self, open_abs_range: TextRange, close_abs_range: TextRange) -> tt::TokenId { | ||
241 | let open_relative_range = open_abs_range - self.global_offset; | ||
242 | let close_relative_range = close_abs_range - self.global_offset; | ||
243 | let token_id = tt::TokenId(self.next_id); | ||
244 | self.next_id += 1; | ||
245 | |||
246 | self.map.insert_delim(token_id, open_relative_range, close_relative_range); | ||
247 | token_id | ||
248 | } | ||
249 | |||
250 | fn open_delim(&mut self, open_abs_range: TextRange) -> tt::TokenId { | 237 | fn open_delim(&mut self, open_abs_range: TextRange) -> tt::TokenId { |
251 | let token_id = tt::TokenId(self.next_id); | 238 | let token_id = tt::TokenId(self.next_id); |
252 | self.next_id += 1; | 239 | self.next_id += 1; |
@@ -264,15 +251,19 @@ struct RawConvertor<'a> { | |||
264 | text: &'a str, | 251 | text: &'a str, |
265 | offset: TextUnit, | 252 | offset: TextUnit, |
266 | id_alloc: TokenIdAlloc, | 253 | id_alloc: TokenIdAlloc, |
267 | inner: std::slice::Iter<'a, Token>, | 254 | inner: std::slice::Iter<'a, RawToken>, |
268 | } | 255 | } |
269 | 256 | ||
270 | trait SrcToken { | 257 | trait SrcToken { |
271 | fn kind() -> SyntaxKind; | 258 | fn kind(&self) -> SyntaxKind; |
259 | |||
260 | fn to_char(&self) -> Option<char>; | ||
261 | |||
262 | fn to_text(&self) -> SmolStr; | ||
272 | } | 263 | } |
273 | 264 | ||
274 | trait TokenConvertor { | 265 | trait TokenConvertor { |
275 | type Token : SrcToken; | 266 | type Token: SrcToken; |
276 | 267 | ||
277 | fn go(&mut self) -> Option<tt::Subtree> { | 268 | fn go(&mut self) -> Option<tt::Subtree> { |
278 | let mut subtree = tt::Subtree::default(); | 269 | let mut subtree = tt::Subtree::default(); |
@@ -291,10 +282,6 @@ trait TokenConvertor { | |||
291 | Some(subtree) | 282 | Some(subtree) |
292 | } | 283 | } |
293 | 284 | ||
294 | fn bump(&mut self) -> Option<(Self::Token, TextRange)>; | ||
295 | |||
296 | fn peek(&self) -> Option<Self::Token>; | ||
297 | |||
298 | fn collect_leaf(&mut self, result: &mut Vec<tt::TokenTree>) { | 285 | fn collect_leaf(&mut self, result: &mut Vec<tt::TokenTree>) { |
299 | let (token, range) = match self.bump() { | 286 | let (token, range) = match self.bump() { |
300 | None => return, | 287 | None => return, |
@@ -303,8 +290,7 @@ trait TokenConvertor { | |||
303 | 290 | ||
304 | let k: SyntaxKind = token.kind(); | 291 | let k: SyntaxKind = token.kind(); |
305 | if k == COMMENT { | 292 | if k == COMMENT { |
306 | let node = doc_comment(&self.text[range]); | 293 | if let Some(tokens) = self.convert_doc_comment(&token) { |
307 | if let Some(tokens) = convert_doc_comment(&node) { | ||
308 | result.extend(tokens); | 294 | result.extend(tokens); |
309 | } | 295 | } |
310 | return; | 296 | return; |
@@ -320,40 +306,39 @@ trait TokenConvertor { | |||
320 | 306 | ||
321 | if let Some((kind, closed)) = delim { | 307 | if let Some((kind, closed)) = delim { |
322 | let mut subtree = tt::Subtree::default(); | 308 | let mut subtree = tt::Subtree::default(); |
323 | let id = self.id_alloc.open_delim(range); | 309 | let id = self.id_alloc().open_delim(range); |
324 | subtree.delimiter = Some(tt::Delimiter { kind, id }); | 310 | subtree.delimiter = Some(tt::Delimiter { kind, id }); |
325 | 311 | ||
326 | while self.peek().map(|it| it.kind != closed).unwrap_or(false) { | 312 | while self.peek().map(|it| it.kind() != closed).unwrap_or(false) { |
327 | self.collect_leaf(&mut subtree.token_trees); | 313 | self.collect_leaf(&mut subtree.token_trees); |
328 | } | 314 | } |
329 | let last_range = match self.bump() { | 315 | let last_range = match self.bump() { |
330 | None => return, | 316 | None => return, |
331 | Some(it) => it.1, | 317 | Some(it) => it.1, |
332 | }; | 318 | }; |
333 | self.id_alloc.close_delim(id, last_range); | 319 | self.id_alloc().close_delim(id, last_range); |
334 | subtree.into() | 320 | subtree.into() |
335 | } else { | 321 | } else { |
336 | let spacing = match self.peek() { | 322 | let spacing = match self.peek() { |
337 | Some(next) | 323 | Some(next) |
338 | if next.kind.is_trivia() | 324 | if next.kind().is_trivia() |
339 | || next.kind == T!['['] | 325 | || next.kind() == T!['['] |
340 | || next.kind == T!['{'] | 326 | || next.kind() == T!['{'] |
341 | || next.kind == T!['('] => | 327 | || next.kind() == T!['('] => |
342 | { | 328 | { |
343 | tt::Spacing::Alone | 329 | tt::Spacing::Alone |
344 | } | 330 | } |
345 | Some(next) if next.kind.is_punct() => tt::Spacing::Joint, | 331 | Some(next) if next.kind().is_punct() => tt::Spacing::Joint, |
346 | _ => tt::Spacing::Alone, | 332 | _ => tt::Spacing::Alone, |
347 | }; | 333 | }; |
348 | let char = | 334 | let char = token.to_char().expect("Token from lexer must be single char"); |
349 | self.text[range].chars().next().expect("Token from lexer must be single char"); | ||
350 | 335 | ||
351 | tt::Leaf::from(tt::Punct { char, spacing, id: self.id_alloc.alloc(range) }).into() | 336 | tt::Leaf::from(tt::Punct { char, spacing, id: self.id_alloc().alloc(range) }).into() |
352 | } | 337 | } |
353 | } else { | 338 | } else { |
354 | macro_rules! make_leaf { | 339 | macro_rules! make_leaf { |
355 | ($i:ident) => { | 340 | ($i:ident) => { |
356 | tt::$i { id: self.id_alloc.alloc(range), text: self.text[range].into() }.into() | 341 | tt::$i { id: self.id_alloc().alloc(range), text: token.to_text() }.into() |
357 | }; | 342 | }; |
358 | } | 343 | } |
359 | let leaf: tt::Leaf = match k { | 344 | let leaf: tt::Leaf = match k { |
@@ -367,237 +352,168 @@ trait TokenConvertor { | |||
367 | leaf.into() | 352 | leaf.into() |
368 | }); | 353 | }); |
369 | } | 354 | } |
355 | |||
356 | fn convert_doc_comment(&self, token: &Self::Token) -> Option<Vec<tt::TokenTree>>; | ||
357 | |||
358 | fn bump(&mut self) -> Option<(Self::Token, TextRange)>; | ||
359 | |||
360 | fn peek(&self) -> Option<Self::Token>; | ||
361 | |||
362 | fn id_alloc(&mut self) -> &mut TokenIdAlloc; | ||
370 | } | 363 | } |
371 | 364 | ||
372 | impl RawConvertor<'_> { | 365 | impl<'a> SrcToken for (RawToken, &'a str) { |
373 | fn go(&mut self) -> Option<tt::Subtree> { | 366 | fn kind(&self) -> SyntaxKind { |
374 | let mut subtree = tt::Subtree::default(); | 367 | self.0.kind |
375 | subtree.delimiter = None; | ||
376 | while self.peek().is_some() { | ||
377 | self.collect_leaf(&mut subtree.token_trees); | ||
378 | } | ||
379 | if subtree.token_trees.is_empty() { | ||
380 | return None; | ||
381 | } | ||
382 | if subtree.token_trees.len() == 1 { | ||
383 | if let tt::TokenTree::Subtree(first) = &subtree.token_trees[0] { | ||
384 | return Some(first.clone()); | ||
385 | } | ||
386 | } | ||
387 | Some(subtree) | ||
388 | } | 368 | } |
389 | 369 | ||
390 | fn bump(&mut self) -> Option<(Token, TextRange)> { | 370 | fn to_char(&self) -> Option<char> { |
391 | let token = self.inner.next()?; | 371 | self.1.chars().next() |
392 | let range = TextRange::offset_len(self.offset, token.len); | ||
393 | self.offset += token.len; | ||
394 | Some((*token, range)) | ||
395 | } | 372 | } |
396 | 373 | ||
397 | fn peek(&self) -> Option<Token> { | 374 | fn to_text(&self) -> SmolStr { |
398 | self.inner.as_slice().get(0).cloned() | 375 | self.1.into() |
399 | } | 376 | } |
400 | 377 | } | |
401 | 378 | ||
402 | fn collect_leaf(&mut self, result: &mut Vec<tt::TokenTree>) { | 379 | impl RawConvertor<'_> {} |
403 | let (token, range) = match self.bump() { | ||
404 | None => return, | ||
405 | Some(it) => it, | ||
406 | }; | ||
407 | 380 | ||
408 | let k: SyntaxKind = token.kind; | 381 | impl<'a> TokenConvertor for RawConvertor<'a> { |
409 | if k == COMMENT { | 382 | type Token = (RawToken, &'a str); |
410 | let node = doc_comment(&self.text[range]); | ||
411 | if let Some(tokens) = convert_doc_comment(&node) { | ||
412 | result.extend(tokens); | ||
413 | } | ||
414 | return; | ||
415 | } | ||
416 | 383 | ||
417 | result.push(if k.is_punct() { | 384 | fn convert_doc_comment(&self, token: &Self::Token) -> Option<Vec<tt::TokenTree>> { |
418 | let delim = match k { | 385 | convert_doc_comment(&doc_comment(token.1)) |
419 | T!['('] => Some((tt::DelimiterKind::Parenthesis, T![')'])), | 386 | } |
420 | T!['{'] => Some((tt::DelimiterKind::Brace, T!['}'])), | ||
421 | T!['['] => Some((tt::DelimiterKind::Bracket, T![']'])), | ||
422 | _ => None, | ||
423 | }; | ||
424 | 387 | ||
425 | if let Some((kind, closed)) = delim { | 388 | fn bump(&mut self) -> Option<(Self::Token, TextRange)> { |
426 | let mut subtree = tt::Subtree::default(); | 389 | let token = self.inner.next()?; |
427 | let id = self.id_alloc.open_delim(range); | 390 | let range = TextRange::offset_len(self.offset, token.len); |
428 | subtree.delimiter = Some(tt::Delimiter { kind, id }); | 391 | self.offset += token.len; |
429 | 392 | ||
430 | while self.peek().map(|it| it.kind != closed).unwrap_or(false) { | 393 | Some(((*token, &self.text[range]), range)) |
431 | self.collect_leaf(&mut subtree.token_trees); | 394 | } |
432 | } | ||
433 | let last_range = match self.bump() { | ||
434 | None => return, | ||
435 | Some(it) => it.1, | ||
436 | }; | ||
437 | self.id_alloc.close_delim(id, last_range); | ||
438 | subtree.into() | ||
439 | } else { | ||
440 | let spacing = match self.peek() { | ||
441 | Some(next) | ||
442 | if next.kind.is_trivia() | ||
443 | || next.kind == T!['['] | ||
444 | || next.kind == T!['{'] | ||
445 | || next.kind == T!['('] => | ||
446 | { | ||
447 | tt::Spacing::Alone | ||
448 | } | ||
449 | Some(next) if next.kind.is_punct() => tt::Spacing::Joint, | ||
450 | _ => tt::Spacing::Alone, | ||
451 | }; | ||
452 | let char = | ||
453 | self.text[range].chars().next().expect("Token from lexer must be single char"); | ||
454 | 395 | ||
455 | tt::Leaf::from(tt::Punct { char, spacing, id: self.id_alloc.alloc(range) }).into() | 396 | fn peek(&self) -> Option<Self::Token> { |
456 | } | 397 | let token = self.inner.as_slice().get(0).cloned(); |
457 | } else { | ||
458 | macro_rules! make_leaf { | ||
459 | ($i:ident) => { | ||
460 | tt::$i { id: self.id_alloc.alloc(range), text: self.text[range].into() }.into() | ||
461 | }; | ||
462 | } | ||
463 | let leaf: tt::Leaf = match k { | ||
464 | T![true] | T![false] => make_leaf!(Literal), | ||
465 | IDENT | LIFETIME => make_leaf!(Ident), | ||
466 | k if k.is_keyword() => make_leaf!(Ident), | ||
467 | k if k.is_literal() => make_leaf!(Literal), | ||
468 | _ => return, | ||
469 | }; | ||
470 | 398 | ||
471 | leaf.into() | 399 | token.map(|it| { |
472 | }); | 400 | let range = TextRange::offset_len(self.offset, it.len); |
401 | (it, &self.text[range]) | ||
402 | }) | ||
403 | } | ||
404 | |||
405 | fn id_alloc(&mut self) -> &mut TokenIdAlloc { | ||
406 | &mut self.id_alloc | ||
473 | } | 407 | } |
474 | } | 408 | } |
475 | 409 | ||
476 | // FIXME: There are some duplicate logic between RawConvertor and Convertor | ||
477 | // It would be nice to refactor to converting SyntaxNode to ra_parser::Token and thus | ||
478 | // use RawConvertor directly. But performance-wise it may not be a good idea ? | ||
479 | struct Convertor { | 410 | struct Convertor { |
480 | id_alloc: TokenIdAlloc, | 411 | id_alloc: TokenIdAlloc, |
412 | current: Option<SyntaxToken>, | ||
413 | range: TextRange, | ||
414 | punct_offset: Option<(SyntaxToken, TextUnit)>, | ||
481 | } | 415 | } |
482 | 416 | ||
483 | impl Convertor { | 417 | impl Convertor { |
484 | fn go(&mut self, tt: &SyntaxNode) -> Option<tt::Subtree> { | 418 | fn new(node: &SyntaxNode, global_offset: TextUnit) -> Convertor { |
485 | // This tree is empty | 419 | Convertor { |
486 | if tt.first_child_or_token().is_none() { | 420 | id_alloc: { TokenIdAlloc { map: TokenMap::default(), global_offset, next_id: 0 } }, |
487 | return Some(tt::Subtree { token_trees: vec![], delimiter: None }); | 421 | current: node.first_token(), |
422 | range: node.text_range(), | ||
423 | punct_offset: None, | ||
488 | } | 424 | } |
425 | } | ||
426 | } | ||
489 | 427 | ||
490 | let first_child = tt.first_child_or_token()?; | 428 | enum SynToken { |
491 | let last_child = tt.last_child_or_token()?; | 429 | Ordiniary(SyntaxToken), |
430 | Punch(SyntaxToken, TextUnit), | ||
431 | } | ||
492 | 432 | ||
493 | // ignore trivial first_child and last_child | 433 | impl SynToken { |
494 | let first_child = successors(Some(first_child), |it| { | 434 | fn token(&self) -> &SyntaxToken { |
495 | if it.kind().is_trivia() { | 435 | match self { |
496 | it.next_sibling_or_token() | 436 | SynToken::Ordiniary(it) => it, |
497 | } else { | 437 | SynToken::Punch(it, _) => it, |
498 | None | ||
499 | } | ||
500 | }) | ||
501 | .last() | ||
502 | .unwrap(); | ||
503 | if first_child.kind().is_trivia() { | ||
504 | return Some(tt::Subtree { token_trees: vec![], delimiter: None }); | ||
505 | } | 438 | } |
439 | } | ||
440 | } | ||
506 | 441 | ||
507 | let last_child = successors(Some(last_child), |it| { | 442 | impl SrcToken for SynToken { |
508 | if it.kind().is_trivia() { | 443 | fn kind(&self) -> SyntaxKind { |
509 | it.prev_sibling_or_token() | 444 | self.token().kind() |
510 | } else { | 445 | } |
511 | None | 446 | fn to_char(&self) -> Option<char> { |
512 | } | 447 | match self { |
513 | }) | 448 | SynToken::Ordiniary(_) => None, |
514 | .last() | 449 | SynToken::Punch(it, i) => it.text().chars().nth(i.to_usize()), |
515 | .unwrap(); | 450 | } |
516 | 451 | } | |
517 | let (delimiter_kind, skip_first) = match (first_child.kind(), last_child.kind()) { | 452 | fn to_text(&self) -> SmolStr { |
518 | (T!['('], T![')']) => (Some(tt::DelimiterKind::Parenthesis), true), | 453 | self.token().text().clone() |
519 | (T!['{'], T!['}']) => (Some(tt::DelimiterKind::Brace), true), | 454 | } |
520 | (T!['['], T![']']) => (Some(tt::DelimiterKind::Bracket), true), | 455 | } |
521 | _ => (None, false), | 456 | |
522 | }; | 457 | impl TokenConvertor for Convertor { |
523 | let delimiter = delimiter_kind.map(|kind| tt::Delimiter { | 458 | type Token = SynToken; |
524 | kind, | 459 | fn convert_doc_comment(&self, token: &Self::Token) -> Option<Vec<tt::TokenTree>> { |
525 | id: self.id_alloc.delim(first_child.text_range(), last_child.text_range()), | 460 | convert_doc_comment(token.token()) |
526 | }); | 461 | } |
527 | 462 | ||
528 | let mut token_trees = Vec::new(); | 463 | fn bump(&mut self) -> Option<(Self::Token, TextRange)> { |
529 | let mut child_iter = tt.children_with_tokens().skip(skip_first as usize).peekable(); | 464 | let curr = self.current.clone()?; |
465 | if !curr.text_range().is_subrange(&self.range) { | ||
466 | return None; | ||
467 | } | ||
530 | 468 | ||
531 | while let Some(child) = child_iter.next() { | 469 | if let Some((punct, offset)) = self.punct_offset.clone() { |
532 | if skip_first && (child == first_child || child == last_child) { | 470 | if offset.to_usize() + 1 < punct.text().len() { |
533 | continue; | 471 | let offset = offset + TextUnit::from_usize(1); |
472 | let range = punct.text_range(); | ||
473 | self.punct_offset = Some((punct, offset)); | ||
474 | let range = TextRange::offset_len(range.start() + offset, TextUnit::from_usize(1)); | ||
475 | return Some((SynToken::Punch(curr, offset), range)); | ||
534 | } | 476 | } |
477 | } | ||
535 | 478 | ||
536 | match child { | 479 | self.current = curr.next_token(); |
537 | NodeOrToken::Token(token) => { | ||
538 | if let Some(doc_tokens) = convert_doc_comment(&token) { | ||
539 | token_trees.extend(doc_tokens); | ||
540 | } else if token.kind().is_trivia() { | ||
541 | continue; | ||
542 | } else if token.kind().is_punct() { | ||
543 | // we need to pull apart joined punctuation tokens | ||
544 | let last_spacing = match child_iter.peek() { | ||
545 | Some(NodeOrToken::Token(token)) => { | ||
546 | if token.kind().is_punct() { | ||
547 | tt::Spacing::Joint | ||
548 | } else { | ||
549 | tt::Spacing::Alone | ||
550 | } | ||
551 | } | ||
552 | _ => tt::Spacing::Alone, | ||
553 | }; | ||
554 | let spacing_iter = std::iter::repeat(tt::Spacing::Joint) | ||
555 | .take(token.text().len() - 1) | ||
556 | .chain(std::iter::once(last_spacing)); | ||
557 | for (char, spacing) in token.text().chars().zip(spacing_iter) { | ||
558 | token_trees.push( | ||
559 | tt::Leaf::from(tt::Punct { | ||
560 | char, | ||
561 | spacing, | ||
562 | id: self.id_alloc.alloc(token.text_range()), | ||
563 | }) | ||
564 | .into(), | ||
565 | ); | ||
566 | } | ||
567 | } else { | ||
568 | macro_rules! make_leaf { | ||
569 | ($i:ident) => { | ||
570 | tt::$i { | ||
571 | id: self.id_alloc.alloc(token.text_range()), | ||
572 | text: token.text().clone(), | ||
573 | } | ||
574 | .into() | ||
575 | }; | ||
576 | } | ||
577 | 480 | ||
578 | let child: tt::Leaf = match token.kind() { | 481 | let token = if curr.kind().is_punct() { |
579 | T![true] | T![false] => make_leaf!(Literal), | 482 | let range = curr.text_range(); |
580 | IDENT | LIFETIME => make_leaf!(Ident), | 483 | self.punct_offset = Some((curr.clone(), TextUnit::from_usize(0))); |
581 | k if k.is_keyword() => make_leaf!(Ident), | 484 | (SynToken::Punch(curr, TextUnit::from_usize(0)), range) |
582 | k if k.is_literal() => make_leaf!(Literal), | 485 | } else { |
583 | _ => return None, | 486 | self.punct_offset = None; |
584 | }; | 487 | let range = curr.text_range(); |
585 | token_trees.push(child.into()); | 488 | (SynToken::Ordiniary(curr), range) |
586 | } | 489 | }; |
587 | } | 490 | |
588 | NodeOrToken::Node(node) => { | 491 | Some(token) |
589 | let child_subtree = self.go(&node)?; | 492 | } |
590 | if child_subtree.delimiter.is_none() && node.kind() != SyntaxKind::TOKEN_TREE { | 493 | |
591 | token_trees.extend(child_subtree.token_trees); | 494 | fn peek(&self) -> Option<Self::Token> { |
592 | } else { | 495 | let curr = self.current.clone()?; |
593 | token_trees.push(child_subtree.into()); | 496 | if !curr.text_range().is_subrange(&self.range) { |
594 | } | 497 | return None; |
595 | } | 498 | } |
596 | }; | 499 | |
500 | if let Some((punct, mut offset)) = self.punct_offset.clone() { | ||
501 | offset = offset + TextUnit::from_usize(1); | ||
502 | if offset.to_usize() < punct.text().len() { | ||
503 | return Some(SynToken::Punch(punct, offset)); | ||
504 | } | ||
597 | } | 505 | } |
598 | 506 | ||
599 | let res = tt::Subtree { delimiter, token_trees }; | 507 | let token = if curr.kind().is_punct() { |
600 | Some(res) | 508 | SynToken::Punch(curr, TextUnit::from_usize(0)) |
509 | } else { | ||
510 | SynToken::Ordiniary(curr) | ||
511 | }; | ||
512 | Some(token) | ||
513 | } | ||
514 | |||
515 | fn id_alloc(&mut self) -> &mut TokenIdAlloc { | ||
516 | &mut self.id_alloc | ||
601 | } | 517 | } |
602 | } | 518 | } |
603 | 519 | ||
diff --git a/crates/ra_mbe/src/tests.rs b/crates/ra_mbe/src/tests.rs index 966af1d12..a3f242e49 100644 --- a/crates/ra_mbe/src/tests.rs +++ b/crates/ra_mbe/src/tests.rs | |||
@@ -1449,8 +1449,8 @@ impl MacroFixture { | |||
1449 | let macro_invocation = | 1449 | let macro_invocation = |
1450 | source_file.syntax().descendants().find_map(ast::MacroCall::cast).unwrap(); | 1450 | source_file.syntax().descendants().find_map(ast::MacroCall::cast).unwrap(); |
1451 | 1451 | ||
1452 | let (invocation_tt, _) = | 1452 | let (invocation_tt, _) = ast_to_token_tree(¯o_invocation.token_tree().unwrap()) |
1453 | ast_to_token_tree(¯o_invocation.token_tree().unwrap()).unwrap(); | 1453 | .ok_or_else(|| ExpandError::ConversionError)?; |
1454 | 1454 | ||
1455 | self.rules.expand(&invocation_tt).result() | 1455 | self.rules.expand(&invocation_tt).result() |
1456 | } | 1456 | } |
@@ -1694,5 +1694,5 @@ fn test_expand_bad_literal() { | |||
1694 | macro_rules! foo { ($i:literal) => {}; } | 1694 | macro_rules! foo { ($i:literal) => {}; } |
1695 | "#, | 1695 | "#, |
1696 | ) | 1696 | ) |
1697 | .assert_expand_err(r#"foo!(&k");"#, &ExpandError::BindingError("".to_string())); | 1697 | .assert_expand_err(r#"foo!(&k");"#, &ExpandError::ConversionError); |
1698 | } | 1698 | } |