diff options
-rw-r--r-- | src/parser/event_parser/grammar.rs | 2 | ||||
-rw-r--r-- | src/parser/event_parser/parser.rs | 29 |
2 files changed, 20 insertions, 11 deletions
diff --git a/src/parser/event_parser/grammar.rs b/src/parser/event_parser/grammar.rs index d657ee1cd..f676a183c 100644 --- a/src/parser/event_parser/grammar.rs +++ b/src/parser/event_parser/grammar.rs | |||
@@ -4,7 +4,7 @@ use syntax_kinds::*; | |||
4 | 4 | ||
5 | // Items // | 5 | // Items // |
6 | 6 | ||
7 | pub fn file(p: &mut Parser) { | 7 | pub(crate) fn file(p: &mut Parser) { |
8 | node(p, FILE, |p| { | 8 | node(p, FILE, |p| { |
9 | p.optional(SHEBANG); | 9 | p.optional(SHEBANG); |
10 | inner_attributes(p); | 10 | inner_attributes(p); |
diff --git a/src/parser/event_parser/parser.rs b/src/parser/event_parser/parser.rs index 923d0bd5a..f0d1d358b 100644 --- a/src/parser/event_parser/parser.rs +++ b/src/parser/event_parser/parser.rs | |||
@@ -3,9 +3,15 @@ use super::{Event}; | |||
3 | use super::super::is_insignificant; | 3 | use super::super::is_insignificant; |
4 | use syntax_kinds::{L_CURLY, R_CURLY, ERROR}; | 4 | use syntax_kinds::{L_CURLY, R_CURLY, ERROR}; |
5 | 5 | ||
6 | pub struct Parser<'t> { | 6 | pub(crate) const EOF: SyntaxKind = SyntaxKind(10000); |
7 | |||
8 | |||
9 | pub(crate) struct Parser<'t> { | ||
10 | #[allow(unused)] | ||
7 | text: &'t str, | 11 | text: &'t str, |
8 | non_ws_tokens: Vec<(Token, TextUnit)>, | 12 | #[allow(unused)] |
13 | start_offsets: Vec<TextUnit>, | ||
14 | tokens: Vec<Token>, // non-whitespace tokens | ||
9 | 15 | ||
10 | pos: usize, | 16 | pos: usize, |
11 | events: Vec<Event>, | 17 | events: Vec<Event>, |
@@ -16,18 +22,21 @@ pub struct Parser<'t> { | |||
16 | 22 | ||
17 | impl<'t> Parser<'t> { | 23 | impl<'t> Parser<'t> { |
18 | pub(crate) fn new(text: &'t str, raw_tokens: &'t [Token]) -> Parser<'t> { | 24 | pub(crate) fn new(text: &'t str, raw_tokens: &'t [Token]) -> Parser<'t> { |
19 | let mut non_ws_tokens = Vec::new(); | 25 | let mut tokens = Vec::new(); |
26 | let mut start_offsets = Vec::new(); | ||
20 | let mut len = TextUnit::new(0); | 27 | let mut len = TextUnit::new(0); |
21 | for &token in raw_tokens.iter() { | 28 | for &token in raw_tokens.iter() { |
22 | if !is_insignificant(token.kind) { | 29 | if !is_insignificant(token.kind) { |
23 | non_ws_tokens.push((token, len)) | 30 | tokens.push(token); |
31 | start_offsets.push(len); | ||
24 | } | 32 | } |
25 | len += token.len; | 33 | len += token.len; |
26 | } | 34 | } |
27 | 35 | ||
28 | Parser { | 36 | Parser { |
29 | text, | 37 | text, |
30 | non_ws_tokens, | 38 | start_offsets, |
39 | tokens, | ||
31 | 40 | ||
32 | pos: 0, | 41 | pos: 0, |
33 | events: Vec::new(), | 42 | events: Vec::new(), |
@@ -42,11 +51,11 @@ impl<'t> Parser<'t> { | |||
42 | } | 51 | } |
43 | 52 | ||
44 | pub(crate) fn is_eof(&self) -> bool { | 53 | pub(crate) fn is_eof(&self) -> bool { |
45 | if self.pos == self.non_ws_tokens.len() { | 54 | if self.pos == self.tokens.len() { |
46 | return true | 55 | return true |
47 | } | 56 | } |
48 | if let Some(limit) = self.curly_limit { | 57 | if let Some(limit) = self.curly_limit { |
49 | let token = self.non_ws_tokens[self.pos].0; | 58 | let token = self.tokens[self.pos]; |
50 | return limit == self.curly_level && token.kind == R_CURLY; | 59 | return limit == self.curly_level && token.kind == R_CURLY; |
51 | } | 60 | } |
52 | false | 61 | false |
@@ -68,7 +77,7 @@ impl<'t> Parser<'t> { | |||
68 | if self.is_eof() { | 77 | if self.is_eof() { |
69 | return None; | 78 | return None; |
70 | } | 79 | } |
71 | let token = self.non_ws_tokens[self.pos].0; | 80 | let token = self.tokens[self.pos]; |
72 | Some(token.kind) | 81 | Some(token.kind) |
73 | } | 82 | } |
74 | 83 | ||
@@ -85,10 +94,10 @@ impl<'t> Parser<'t> { | |||
85 | } | 94 | } |
86 | 95 | ||
87 | pub(crate) fn lookahead(&self, kinds: &[SyntaxKind]) -> bool { | 96 | pub(crate) fn lookahead(&self, kinds: &[SyntaxKind]) -> bool { |
88 | if self.non_ws_tokens[self.pos..].len() < kinds.len() { | 97 | if self.tokens[self.pos..].len() < kinds.len() { |
89 | return false | 98 | return false |
90 | } | 99 | } |
91 | kinds.iter().zip(self.non_ws_tokens[self.pos..].iter().map(|&(t, _)| t.kind)) | 100 | kinds.iter().zip(self.tokens[self.pos..].iter().map(|t| t.kind)) |
92 | .all(|(&k1, k2)| k1 == k2) | 101 | .all(|(&k1, k2)| k1 == k2) |
93 | } | 102 | } |
94 | 103 | ||