aboutsummaryrefslogtreecommitdiff
path: root/crates/ra_syntax/src/parsing/input.rs
blob: 58be795bc302515b3e8e6c87e8ece2df084b2170 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
use ra_parser::TokenSource;

use crate::{
    SyntaxKind, SyntaxKind::EOF, TextRange, TextUnit,
    parsing::lexer::Token,
};

impl<'t> TokenSource for ParserInput<'t> {
    fn token_kind(&self, pos: usize) -> SyntaxKind {
        if !(pos < self.tokens.len()) {
            return EOF;
        }
        self.tokens[pos].kind
    }
    fn is_token_joint_to_next(&self, pos: usize) -> bool {
        if !(pos + 1 < self.tokens.len()) {
            return true;
        }
        self.start_offsets[pos] + self.tokens[pos].len == self.start_offsets[pos + 1]
    }
    fn is_keyword(&self, pos: usize, kw: &str) -> bool {
        if !(pos < self.tokens.len()) {
            return false;
        }
        let range = TextRange::offset_len(self.start_offsets[pos], self.tokens[pos].len);

        self.text[range] == *kw
    }
}

pub(crate) struct ParserInput<'t> {
    text: &'t str,
    /// start position of each token(expect whitespace and comment)
    /// ```non-rust
    ///  struct Foo;
    /// ^------^---
    /// |      |  ^-
    /// 0      7  10
    /// ```
    /// (token, start_offset): `[(struct, 0), (Foo, 7), (;, 10)]`
    start_offsets: Vec<TextUnit>,
    /// non-whitespace/comment tokens
    /// ```non-rust
    /// struct Foo {}
    /// ^^^^^^ ^^^ ^^
    /// ```
    /// tokens: `[struct, Foo, {, }]`
    tokens: Vec<Token>,
}

impl<'t> ParserInput<'t> {
    /// Generate input from tokens(expect comment and whitespace).
    pub fn new(text: &'t str, raw_tokens: &'t [Token]) -> ParserInput<'t> {
        let mut tokens = Vec::new();
        let mut start_offsets = Vec::new();
        let mut len = 0.into();
        for &token in raw_tokens.iter() {
            if !token.kind.is_trivia() {
                tokens.push(token);
                start_offsets.push(len);
            }
            len += token.len;
        }

        ParserInput { text, start_offsets, tokens }
    }
}