From 4de3c97b2afea55834cd16368f950133459d8c73 Mon Sep 17 00:00:00 2001 From: Christopher Durham Date: Sat, 27 Jan 2018 18:31:23 -0500 Subject: Enforce rustfmt format --- .travis.yml | 11 ++ Cargo.toml | 2 +- rustfmt.toml | 0 src/bin/gen.rs | 21 ++-- src/bin/parse-rust.rs | 2 +- src/lexer/classes.rs | 9 +- src/lexer/comments.rs | 5 +- src/lexer/mod.rs | 143 ++++++++++++++----------- src/lexer/numbers.rs | 6 +- src/lexer/ptr.rs | 11 +- src/lexer/strings.rs | 33 +++--- src/lib.rs | 8 +- src/parser/event_parser/grammar/attributes.rs | 15 +-- src/parser/event_parser/grammar/expressions.rs | 8 +- src/parser/event_parser/grammar/items.rs | 38 ++----- src/parser/event_parser/grammar/mod.rs | 25 ++--- src/parser/event_parser/grammar/paths.rs | 4 +- src/parser/event_parser/grammar/types.rs | 2 +- src/parser/event_parser/mod.rs | 2 +- src/parser/event_parser/parser.rs | 46 +++++--- src/parser/mod.rs | 30 +++--- src/text.rs | 9 +- src/tree/file_builder.rs | 32 +++--- src/tree/mod.rs | 44 ++++---- tests/lexer.rs | 15 ++- tests/parser.rs | 15 ++- 26 files changed, 280 insertions(+), 256 deletions(-) create mode 100644 rustfmt.toml diff --git a/.travis.yml b/.travis.yml index a493f815a..afca18dea 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,14 @@ language: rust rust: - stable + +matrix: + include: + - rust: nightly-2018-01-26 + before_script: + - rustup component add rustfmt-preview + script: + - cargo fmt -- --write-mode=diff + +script: + - cargo test diff --git a/Cargo.toml b/Cargo.toml index 802a99b37..e5caa5d12 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,4 +13,4 @@ file = "1.1.1" ron = "0.1.5" [dev-dependencies] -testutils = { path = "./tests/testutils" } \ No newline at end of file +testutils = { path = "./tests/testutils" } diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..e69de29bb diff --git a/src/bin/gen.rs b/src/bin/gen.rs index 5ebf3e2e8..8f6402f5c 100644 --- a/src/bin/gen.rs +++ b/src/bin/gen.rs @@ -2,8 +2,8 @@ extern crate serde; #[macro_use] extern crate serde_derive; -extern crate ron; extern crate file; +extern crate ron; use std::path::PathBuf; use std::fmt::Write; @@ -33,11 +33,12 @@ impl Grammar { acc.push_str("use tree::{SyntaxKind, SyntaxInfo};\n"); acc.push_str("\n"); - let syntax_kinds: Vec = - self.keywords.iter().map(|kw| kw_token(kw)) - .chain(self.tokens.iter().cloned()) - .chain(self.nodes.iter().cloned()) - .collect(); + let syntax_kinds: Vec = self.keywords + .iter() + .map(|kw| kw_token(kw)) + .chain(self.tokens.iter().cloned()) + .chain(self.nodes.iter().cloned()) + .collect(); for (idx, kind) in syntax_kinds.iter().enumerate() { let sname = scream(kind); @@ -48,7 +49,11 @@ impl Grammar { ).unwrap(); } acc.push_str("\n"); - write!(acc, "static INFOS: [SyntaxInfo; {}] = [\n", syntax_kinds.len()).unwrap(); + write!( + acc, + "static INFOS: [SyntaxInfo; {}] = [\n", + syntax_kinds.len() + ).unwrap(); for kind in syntax_kinds.iter() { let sname = scream(kind); write!( @@ -91,4 +96,4 @@ fn scream(word: &str) -> String { fn kw_token(keyword: &str) -> String { format!("{}_KW", scream(keyword)) -} \ No newline at end of file +} diff --git a/src/bin/parse-rust.rs b/src/bin/parse-rust.rs index 3c13e732e..af1325bfc 100644 --- a/src/bin/parse-rust.rs +++ b/src/bin/parse-rust.rs @@ -2,7 +2,7 @@ extern crate libsyntax2; use std::io::Read; -use libsyntax2::{tokenize, parse}; +use libsyntax2::{parse, tokenize}; use libsyntax2::utils::dump_tree; fn main() { diff --git a/src/lexer/classes.rs b/src/lexer/classes.rs index 4235d2648..7fed008af 100644 --- a/src/lexer/classes.rs +++ b/src/lexer/classes.rs @@ -1,17 +1,12 @@ use unicode_xid::UnicodeXID; pub fn is_ident_start(c: char) -> bool { - (c >= 'a' && c <= 'z') - || (c >= 'A' && c <= 'Z') - || c == '_' + (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || (c > '\x7f' && UnicodeXID::is_xid_start(c)) } pub fn is_ident_continue(c: char) -> bool { - (c >= 'a' && c <= 'z') - || (c >= 'A' && c <= 'Z') - || (c >= '0' && c <= '9') - || c == '_' + (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_' || (c > '\x7f' && UnicodeXID::is_xid_continue(c)) } diff --git a/src/lexer/comments.rs b/src/lexer/comments.rs index 79782cc5b..b70f2c6c6 100644 --- a/src/lexer/comments.rs +++ b/src/lexer/comments.rs @@ -1,6 +1,6 @@ use lexer::ptr::Ptr; -use {SyntaxKind}; +use SyntaxKind; use syntax_kinds::*; pub(crate) fn scan_shebang(ptr: &mut Ptr) -> bool { @@ -23,7 +23,6 @@ pub(crate) fn scan_comment(ptr: &mut Ptr) -> Option { } } - fn bump_until_eol(ptr: &mut Ptr) { loop { if ptr.next_is('\n') || ptr.next_is('\r') && ptr.nnext_is('\n') { @@ -33,4 +32,4 @@ fn bump_until_eol(ptr: &mut Ptr) { break; } } -} \ No newline at end of file +} diff --git a/src/lexer/mod.rs b/src/lexer/mod.rs index 842059a42..f62dfc507 100644 --- a/src/lexer/mod.rs +++ b/src/lexer/mod.rs @@ -1,4 +1,4 @@ -use {Token, SyntaxKind}; +use {SyntaxKind, Token}; use syntax_kinds::*; mod ptr; @@ -11,10 +11,11 @@ mod numbers; use self::numbers::scan_number; mod strings; -use self::strings::{is_string_literal_start, scan_char, scan_byte_char_or_string, scan_string, scan_raw_string}; +use self::strings::{is_string_literal_start, scan_byte_char_or_string, scan_char, scan_raw_string, + scan_string}; mod comments; -use self::comments::{scan_shebang, scan_comment}; +use self::comments::{scan_comment, scan_shebang}; pub fn tokenize(text: &str) -> Vec { let mut text = text; @@ -45,10 +46,10 @@ fn next_token_inner(c: char, ptr: &mut Ptr) -> SyntaxKind { match c { '#' => if scan_shebang(ptr) { return SHEBANG; - } + }, '/' => if let Some(kind) = scan_comment(ptr) { return kind; - } + }, _ => (), } @@ -89,79 +90,91 @@ fn next_token_inner(c: char, ptr: &mut Ptr) -> SyntaxKind { '%' => return PERCENT, // Multi-byte tokens. - '.' => return match (ptr.next(), ptr.nnext()) { - (Some('.'), Some('.')) => { - ptr.bump(); - ptr.bump(); - DOTDOTDOT - }, - (Some('.'), Some('=')) => { - ptr.bump(); - ptr.bump(); - DOTDOTEQ - }, - (Some('.'), _) => { - ptr.bump(); - DOTDOT - }, - _ => DOT - }, - ':' => return match ptr.next() { - Some(':') => { - ptr.bump(); - COLONCOLON + '.' => { + return match (ptr.next(), ptr.nnext()) { + (Some('.'), Some('.')) => { + ptr.bump(); + ptr.bump(); + DOTDOTDOT + } + (Some('.'), Some('=')) => { + ptr.bump(); + ptr.bump(); + DOTDOTEQ + } + (Some('.'), _) => { + ptr.bump(); + DOTDOT + } + _ => DOT, } - _ => COLON - }, - '=' => return match ptr.next() { - Some('=') => { - ptr.bump(); - EQEQ + } + ':' => { + return match ptr.next() { + Some(':') => { + ptr.bump(); + COLONCOLON + } + _ => COLON, } - Some('>') => { - ptr.bump(); - FAT_ARROW + } + '=' => { + return match ptr.next() { + Some('=') => { + ptr.bump(); + EQEQ + } + Some('>') => { + ptr.bump(); + FAT_ARROW + } + _ => EQ, } - _ => EQ, - }, - '!' => return match ptr.next() { - Some('=') => { + } + '!' => { + return match ptr.next() { + Some('=') => { + ptr.bump(); + NEQ + } + _ => EXCL, + } + } + '-' => { + return if ptr.next_is('>') { ptr.bump(); - NEQ + THIN_ARROW + } else { + MINUS } - _ => EXCL, - }, - '-' => return if ptr.next_is('>') { - ptr.bump(); - THIN_ARROW - } else { - MINUS - }, + } // If the character is an ident start not followed by another single // quote, then this is a lifetime name: - '\'' => return if ptr.next_is_p(is_ident_start) && !ptr.nnext_is('\'') { - ptr.bump(); - while ptr.next_is_p(is_ident_continue) { + '\'' => { + return if ptr.next_is_p(is_ident_start) && !ptr.nnext_is('\'') { ptr.bump(); + while ptr.next_is_p(is_ident_continue) { + ptr.bump(); + } + // lifetimes shouldn't end with a single quote + // if we find one, then this is an invalid character literal + if ptr.next_is('\'') { + ptr.bump(); + return CHAR; // TODO: error reporting + } + LIFETIME + } else { + scan_char(ptr); + scan_literal_suffix(ptr); + CHAR } - // lifetimes shouldn't end with a single quote - // if we find one, then this is an invalid character literal - if ptr.next_is('\'') { - ptr.bump(); - return CHAR; // TODO: error reporting - } - LIFETIME - } else { - scan_char(ptr); - scan_literal_suffix(ptr); - CHAR - }, + } 'b' => { let kind = scan_byte_char_or_string(ptr); scan_literal_suffix(ptr); - return kind - }, + return kind; + } '"' => { scan_string(ptr); scan_literal_suffix(ptr); diff --git a/src/lexer/numbers.rs b/src/lexer/numbers.rs index 4c7edfe1c..95e42246f 100644 --- a/src/lexer/numbers.rs +++ b/src/lexer/numbers.rs @@ -1,7 +1,7 @@ use lexer::ptr::Ptr; use lexer::classes::*; -use {SyntaxKind}; +use SyntaxKind; use syntax_kinds::*; pub(crate) fn scan_number(c: char, ptr: &mut Ptr) -> SyntaxKind { @@ -49,10 +49,10 @@ fn scan_digits(ptr: &mut Ptr, allow_hex: bool) { '_' | '0'...'9' => { ptr.bump(); } - 'a'...'f' | 'A' ... 'F' if allow_hex => { + 'a'...'f' | 'A'...'F' if allow_hex => { ptr.bump(); } - _ => return + _ => return, } } } diff --git a/src/lexer/ptr.rs b/src/lexer/ptr.rs index ff6ef11fc..99d55b283 100644 --- a/src/lexer/ptr.rs +++ b/src/lexer/ptr.rs @@ -1,4 +1,4 @@ -use {TextUnit}; +use TextUnit; use std::str::Chars; @@ -9,7 +9,10 @@ pub(crate) struct Ptr<'s> { impl<'s> Ptr<'s> { pub fn new(text: &'s str) -> Ptr<'s> { - Ptr { text, len: TextUnit::new(0) } + Ptr { + text, + len: TextUnit::new(0), + } } pub fn into_len(self) -> TextUnit { @@ -53,7 +56,7 @@ impl<'s> Ptr<'s> { match self.next() { Some(c) if pred(c) => { self.bump(); - }, + } _ => return, } } @@ -66,6 +69,6 @@ impl<'s> Ptr<'s> { fn chars(&self) -> Chars { let len: u32 = self.len.into(); - self.text[len as usize ..].chars() + self.text[len as usize..].chars() } } diff --git a/src/lexer/strings.rs b/src/lexer/strings.rs index 116d31760..00a84ec85 100644 --- a/src/lexer/strings.rs +++ b/src/lexer/strings.rs @@ -1,17 +1,17 @@ -use {SyntaxKind}; +use SyntaxKind; use syntax_kinds::*; use lexer::ptr::Ptr; pub(crate) fn is_string_literal_start(c: char, c1: Option, c2: Option) -> bool { match (c, c1, c2) { - ('r', Some('"'), _) | - ('r', Some('#'), _) | - ('b', Some('"'), _) | - ('b', Some('\''), _) | - ('b', Some('r'), Some('"')) | - ('b', Some('r'), Some('#')) => true, - _ => false + ('r', Some('"'), _) + | ('r', Some('#'), _) + | ('b', Some('"'), _) + | ('b', Some('\''), _) + | ('b', Some('r'), Some('"')) + | ('b', Some('r'), Some('#')) => true, + _ => false, } } @@ -50,20 +50,20 @@ pub(crate) fn scan_byte_char_or_string(ptr: &mut Ptr) -> SyntaxKind { pub(crate) fn scan_string(ptr: &mut Ptr) { while let Some(c) = ptr.bump() { if c == '"' { - return + return; } } } pub(crate) fn scan_raw_string(ptr: &mut Ptr) { if !ptr.next_is('"') { - return + return; } ptr.bump(); while let Some(c) = ptr.bump() { if c == '"' { - return + return; } } } @@ -71,32 +71,32 @@ pub(crate) fn scan_raw_string(ptr: &mut Ptr) { fn scan_byte(ptr: &mut Ptr) { if ptr.next_is('\'') { ptr.bump(); - return + return; } ptr.bump(); if ptr.next_is('\'') { ptr.bump(); - return + return; } } fn scan_byte_string(ptr: &mut Ptr) { while let Some(c) = ptr.bump() { if c == '"' { - return + return; } } } fn scan_raw_byte_string(ptr: &mut Ptr) { if !ptr.next_is('"') { - return + return; } ptr.bump(); while let Some(c) = ptr.bump() { if c == '"' { - return + return; } } } @@ -105,4 +105,3 @@ fn scan_char_or_byte(ptr: &mut Ptr) { //FIXME: deal with escape sequencies ptr.bump(); } - diff --git a/src/lib.rs b/src/lib.rs index 7fd9e547a..39b01a1cb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,9 +5,10 @@ mod tree; mod lexer; mod parser; +#[cfg_attr(rustfmt, rustfmt_skip)] pub mod syntax_kinds; -pub use text::{TextUnit, TextRange}; -pub use tree::{SyntaxKind, Token, FileBuilder, Sink, File, Node}; +pub use text::{TextRange, TextUnit}; +pub use tree::{File, FileBuilder, Node, Sink, SyntaxKind, Token}; pub use lexer::{next_token, tokenize}; pub use parser::parse; @@ -25,7 +26,8 @@ pub mod utils { buff.push_str(&String::from(" ").repeat(level)); write!(buff, "{:?}\n", node).unwrap(); let my_errors = node.errors().filter(|e| e.after_child().is_none()); - let parent_errors = node.parent().into_iter() + let parent_errors = node.parent() + .into_iter() .flat_map(|n| n.errors()) .filter(|e| e.after_child() == Some(node)); diff --git a/src/parser/event_parser/grammar/attributes.rs b/src/parser/event_parser/grammar/attributes.rs index 045840059..8bf04afce 100644 --- a/src/parser/event_parser/grammar/attributes.rs +++ b/src/parser/event_parser/grammar/attributes.rs @@ -12,8 +12,7 @@ pub(super) fn outer_attributes(p: &mut Parser) { } } - -fn attribute(p: &mut Parser, inner: bool){ +fn attribute(p: &mut Parser, inner: bool) { let attr = p.start(); assert!(p.at(POUND)); p.bump(); @@ -38,9 +37,7 @@ fn meta_item(p: &mut Parser) { EQ => { p.bump(); if !expressions::literal(p) { - p.error() - .message("expected literal") - .emit(); + p.error().message("expected literal").emit(); } } L_PAREN => meta_item_arg_list(p), @@ -48,9 +45,7 @@ fn meta_item(p: &mut Parser) { } meta_item.complete(p, META_ITEM); } else { - p.error() - .message("expected attribute value") - .emit() + p.error().message("expected attribute value").emit() } } @@ -73,8 +68,8 @@ fn meta_item_arg_list(p: &mut Parser) { p.error().message(message).emit(); p.bump(); err.complete(p, ERROR); - continue - } + continue; + }, } if !p.at(R_PAREN) { p.expect(COMMA); diff --git a/src/parser/event_parser/grammar/expressions.rs b/src/parser/event_parser/grammar/expressions.rs index a943b8c81..c81dc6c35 100644 --- a/src/parser/event_parser/grammar/expressions.rs +++ b/src/parser/event_parser/grammar/expressions.rs @@ -2,15 +2,13 @@ use super::*; pub(super) fn literal(p: &mut Parser) -> bool { match p.current() { - TRUE_KW | FALSE_KW | - INT_NUMBER | FLOAT_NUMBER | - BYTE | CHAR | - STRING | RAW_STRING | BYTE_STRING | RAW_BYTE_STRING => { + TRUE_KW | FALSE_KW | INT_NUMBER | FLOAT_NUMBER | BYTE | CHAR | STRING | RAW_STRING + | BYTE_STRING | RAW_BYTE_STRING => { let lit = p.start(); p.bump(); lit.complete(p, LITERAL); true } - _ => false + _ => false, } } diff --git a/src/parser/event_parser/grammar/items.rs b/src/parser/event_parser/grammar/items.rs index 7706690cc..e569e5047 100644 --- a/src/parser/event_parser/grammar/items.rs +++ b/src/parser/event_parser/grammar/items.rs @@ -7,15 +7,8 @@ pub(super) fn mod_contents(p: &mut Parser) { } } -pub(super) const ITEM_FIRST: TokenSet = token_set![ - EXTERN_KW, - MOD_KW, - USE_KW, - STRUCT_KW, - FN_KW, - PUB_KW, - POUND, -]; +pub(super) const ITEM_FIRST: TokenSet = + token_set![EXTERN_KW, MOD_KW, USE_KW, STRUCT_KW, FN_KW, PUB_KW, POUND,]; fn item(p: &mut Parser) { let item = p.start(); @@ -48,7 +41,7 @@ fn item(p: &mut Parser) { let message = if err_token == SEMI { //TODO: if the item is incomplete, this message is misleading "expected item, found `;`\n\ - consider removing this semicolon" + consider removing this semicolon" } else { "expected item" }; @@ -76,10 +69,9 @@ fn struct_item(p: &mut Parser) { return; } L_CURLY => named_fields(p), - _ => { //TODO: special case `(` error message - p.error() - .message("expected `;` or `{`") - .emit(); + _ => { + //TODO: special case `(` error message + p.error().message("expected `;` or `{`").emit(); return; } } @@ -94,9 +86,7 @@ fn struct_item(p: &mut Parser) { p.expect(SEMI); } _ => { - p.error() - .message("expected `;`, `{`, or `(`") - .emit(); + p.error().message("expected `;`, `{`, or `(`").emit(); return; } } @@ -177,7 +167,7 @@ fn use_item(p: &mut Parser) { use_tree(p); p.expect(SEMI); - fn use_tree(p: &mut Parser){ + fn use_tree(p: &mut Parser) { let la = p.raw_lookahead(1); let m = p.start(); match (p.current(), la) { @@ -209,9 +199,7 @@ fn use_item(p: &mut Parser) { L_CURLY => nested_trees(p), _ => { // is this unreachable? - p.error() - .message("expected `{` or `*`") - .emit(); + p.error().message("expected `{` or `*`").emit(); } } } @@ -222,7 +210,7 @@ fn use_item(p: &mut Parser) { m.abandon(p); p.err_and_bump("expected one of `*`, `::`, `{`, `self`, `super`, `indent`"); return; - }, + } } m.complete(p, USE_TREE); } @@ -240,13 +228,9 @@ fn use_item(p: &mut Parser) { } } - fn fn_item(p: &mut Parser) { assert!(p.at(FN_KW)); p.bump(); - p.expect(IDENT) && p.expect(L_PAREN) && p.expect(R_PAREN) - && p.curly_block(|_| ()); + p.expect(IDENT) && p.expect(L_PAREN) && p.expect(R_PAREN) && p.curly_block(|_| ()); } - - diff --git a/src/parser/event_parser/grammar/mod.rs b/src/parser/event_parser/grammar/mod.rs index 6e4f72096..c6ab1fbe2 100644 --- a/src/parser/event_parser/grammar/mod.rs +++ b/src/parser/event_parser/grammar/mod.rs @@ -1,5 +1,5 @@ use super::parser::{Parser, TokenSet}; -use {SyntaxKind}; +use SyntaxKind; use tree::EOF; use syntax_kinds::*; @@ -29,7 +29,7 @@ fn visibility(p: &mut Parser) { } p.expect(R_PAREN); } - _ => () + _ => (), } } vis.complete(p, VISIBILITY); @@ -53,9 +53,7 @@ impl<'p> Parser<'p> { fn err_and_bump(&mut self, message: &str) { let err = self.start(); - self.error() - .message(message) - .emit(); + self.error().message(message).emit(); self.bump(); err.complete(self, ERROR); } @@ -65,15 +63,16 @@ impl<'p> Parser<'p> { self.bump(); true } else { - self.error() - .message(format!("expected {:?}", kind)) - .emit(); + self.error().message(format!("expected {:?}", kind)).emit(); false } } fn eat(&mut self, kind: SyntaxKind) -> bool { - self.current() == kind && { self.bump(); true } + self.current() == kind && { + self.bump(); + true + } } } @@ -94,8 +93,7 @@ impl Lookahead for SyntaxKind { impl Lookahead for [SyntaxKind; 2] { fn is_ahead(self, p: &Parser) -> bool { - p.current() == self[0] - && p.raw_lookahead(1) == self[1] + p.current() == self[0] && p.raw_lookahead(1) == self[1] } fn consume(p: &mut Parser) { @@ -106,9 +104,7 @@ impl Lookahead for [SyntaxKind; 2] { impl Lookahead for [SyntaxKind; 3] { fn is_ahead(self, p: &Parser) -> bool { - p.current() == self[0] - && p.raw_lookahead(1) == self[1] - && p.raw_lookahead(2) == self[2] + p.current() == self[0] && p.raw_lookahead(1) == self[1] && p.raw_lookahead(2) == self[2] } fn consume(p: &mut Parser) { @@ -130,5 +126,4 @@ impl<'a> Lookahead for AnyOf<'a> { fn consume(p: &mut Parser) { p.bump(); } - } diff --git a/src/parser/event_parser/grammar/paths.rs b/src/parser/event_parser/grammar/paths.rs index b58c59aef..4e028073a 100644 --- a/src/parser/event_parser/grammar/paths.rs +++ b/src/parser/event_parser/grammar/paths.rs @@ -34,9 +34,7 @@ fn path_segment(p: &mut Parser, first: bool) { p.bump(); } _ => { - p.error() - .message("expected identifier") - .emit(); + p.error().message("expected identifier").emit(); } }; segment.complete(p, PATH_SEGMENT); diff --git a/src/parser/event_parser/grammar/types.rs b/src/parser/event_parser/grammar/types.rs index c431643d7..1a3d44a0a 100644 --- a/src/parser/event_parser/grammar/types.rs +++ b/src/parser/event_parser/grammar/types.rs @@ -2,4 +2,4 @@ use super::*; pub(super) fn type_ref(p: &mut Parser) { p.expect(IDENT); -} \ No newline at end of file +} diff --git a/src/parser/event_parser/mod.rs b/src/parser/event_parser/mod.rs index b9ffded9d..65aea017b 100644 --- a/src/parser/event_parser/mod.rs +++ b/src/parser/event_parser/mod.rs @@ -1,4 +1,4 @@ -use {Token, SyntaxKind}; +use {SyntaxKind, Token}; #[macro_use] mod parser; diff --git a/src/parser/event_parser/parser.rs b/src/parser/event_parser/parser.rs index 18231e493..5ba3071cb 100644 --- a/src/parser/event_parser/parser.rs +++ b/src/parser/event_parser/parser.rs @@ -1,17 +1,19 @@ -use {Token, SyntaxKind, TextUnit}; +use {SyntaxKind, TextUnit, Token}; use super::Event; use super::super::is_insignificant; -use syntax_kinds::{L_CURLY, R_CURLY, ERROR}; +use syntax_kinds::{ERROR, L_CURLY, R_CURLY}; use tree::{EOF, TOMBSTONE}; pub(crate) struct Marker { - pos: u32 + pos: u32, } impl Marker { pub fn complete(self, p: &mut Parser, kind: SyntaxKind) -> CompleteMarker { match self.event(p) { - &mut Event::Start { kind: ref mut slot, ..} => { + &mut Event::Start { + kind: ref mut slot, .. + } => { *slot = kind; } _ => unreachable!(), @@ -26,8 +28,11 @@ impl Marker { let idx = self.pos as usize; if idx == p.events.len() - 1 { match p.events.pop() { - Some(Event::Start { kind: TOMBSTONE, forward_parent: None }) => (), - _ => unreachable!() + Some(Event::Start { + kind: TOMBSTONE, + forward_parent: None, + }) => (), + _ => unreachable!(), } } ::std::mem::forget(self); @@ -51,14 +56,17 @@ impl Drop for Marker { } pub(crate) struct CompleteMarker { - pos: u32 + pos: u32, } impl CompleteMarker { pub(crate) fn precede(self, p: &mut Parser) -> Marker { let m = p.start(); match p.events[self.pos as usize] { - Event::Start { ref mut forward_parent, ..} => { + Event::Start { + ref mut forward_parent, + .. + } => { *forward_parent = Some(m.pos - self.pos); } _ => unreachable!(), @@ -68,7 +76,7 @@ impl CompleteMarker { } pub(crate) struct TokenSet { - pub tokens: &'static [SyntaxKind] + pub tokens: &'static [SyntaxKind], } impl TokenSet { @@ -90,7 +98,6 @@ macro_rules! token_set { }; } - pub(crate) struct Parser<'t> { #[allow(unused)] text: &'t str, @@ -150,8 +157,13 @@ impl<'t> Parser<'t> { } pub(crate) fn start(&mut self) -> Marker { - let m = Marker { pos: self.events.len() as u32 }; - self.event(Event::Start { kind: TOMBSTONE, forward_parent: None }); + let m = Marker { + pos: self.events.len() as u32, + }; + self.event(Event::Start { + kind: TOMBSTONE, + forward_parent: None, + }); m } @@ -168,7 +180,10 @@ impl<'t> Parser<'t> { _ => (), } self.pos += 1; - self.event(Event::Token { kind, n_raw_tokens: 1 }); + self.event(Event::Token { + kind, + n_raw_tokens: 1, + }); kind } @@ -210,7 +225,10 @@ pub(crate) struct ErrorBuilder<'p, 't: 'p> { impl<'t, 'p> ErrorBuilder<'p, 't> { fn new(parser: &'p mut Parser<'t>) -> Self { - ErrorBuilder { message: None, parser } + ErrorBuilder { + message: None, + parser, + } } pub fn message>(mut self, m: M) -> Self { diff --git a/src/parser/mod.rs b/src/parser/mod.rs index 5ec4b8e93..d04ed1e75 100644 --- a/src/parser/mod.rs +++ b/src/parser/mod.rs @@ -1,4 +1,4 @@ -use {Token, File, FileBuilder, Sink, SyntaxKind}; +use {File, FileBuilder, Sink, SyntaxKind, Token}; use syntax_kinds::*; use tree::TOMBSTONE; @@ -6,17 +6,12 @@ use tree::TOMBSTONE; mod event_parser; use self::event_parser::Event; - pub fn parse(text: String, tokens: &[Token]) -> File { let events = event_parser::parse(&text, tokens); from_events_to_file(text, tokens, events) } -fn from_events_to_file( - text: String, - tokens: &[Token], - events: Vec, -) -> File { +fn from_events_to_file(text: String, tokens: &[Token], events: Vec) -> File { let mut builder = FileBuilder::new(text); let mut idx = 0; @@ -26,18 +21,23 @@ fn from_events_to_file( for (i, event) in events.iter().enumerate() { if holes.last() == Some(&i) { holes.pop(); - continue + continue; } match event { - &Event::Start { kind: TOMBSTONE, .. } => (), + &Event::Start { + kind: TOMBSTONE, .. + } => (), &Event::Start { .. } => { forward_parents.clear(); let mut idx = i; loop { let (kind, fwd) = match events[idx] { - Event::Start { kind, forward_parent } => (kind, forward_parent), + Event::Start { + kind, + forward_parent, + } => (kind, forward_parent), _ => unreachable!(), }; forward_parents.push((idx, kind)); @@ -64,8 +64,11 @@ fn from_events_to_file( } } builder.finish_internal() - }, - &Event::Token { kind: _, mut n_raw_tokens } => loop { + } + &Event::Token { + kind: _, + mut n_raw_tokens, + } => loop { let token = tokens[idx]; if !is_insignificant(token.kind) { n_raw_tokens -= 1; @@ -76,8 +79,7 @@ fn from_events_to_file( break; } }, - &Event::Error { ref message } => - builder.error().message(message.clone()).emit(), + &Event::Error { ref message } => builder.error().message(message.clone()).emit(), } } builder.finish() diff --git a/src/text.rs b/src/text.rs index af0a4a9e7..ac1a54a75 100644 --- a/src/text.rs +++ b/src/text.rs @@ -64,7 +64,6 @@ impl ops::SubAssign for TextUnit { } } - #[derive(Clone, Copy, PartialEq, Eq)] pub struct TextRange { start: TextUnit, @@ -83,7 +82,6 @@ impl fmt::Display for TextRange { } } - impl TextRange { pub fn empty() -> TextRange { TextRange::from_to(TextUnit::new(0), TextUnit::new(0)) @@ -91,7 +89,10 @@ impl TextRange { pub fn from_to(from: TextUnit, to: TextUnit) -> TextRange { assert!(from <= to, "Invalid text range [{}; {})", from, to); - TextRange { start: from, end: to } + TextRange { + start: from, + end: to, + } } pub fn from_len(from: TextUnit, len: TextUnit) -> TextRange { @@ -121,4 +122,4 @@ impl ops::Index for str { fn index(&self, index: TextRange) -> &str { &self[index.start().0 as usize..index.end().0 as usize] } -} \ No newline at end of file +} diff --git a/src/tree/file_builder.rs b/src/tree/file_builder.rs index 35702ddd7..939922cb2 100644 --- a/src/tree/file_builder.rs +++ b/src/tree/file_builder.rs @@ -1,5 +1,5 @@ -use {SyntaxKind, TextUnit, TextRange}; -use super::{NodeData, SyntaxErrorData, NodeIdx, File}; +use {SyntaxKind, TextRange, TextUnit}; +use super::{File, NodeData, NodeIdx, SyntaxErrorData}; pub trait Sink { fn leaf(&mut self, kind: SyntaxKind, len: TextUnit); @@ -8,7 +8,6 @@ pub trait Sink { fn error(&mut self) -> ErrorBuilder; } - pub struct FileBuilder { text: String, nodes: Vec, @@ -48,9 +47,9 @@ impl Sink for FileBuilder { } fn finish_internal(&mut self) { - let (id, _) = self.in_progress.pop().expect( - "trying to complete a node, but there are no in-progress nodes" - ); + let (id, _) = self.in_progress + .pop() + .expect("trying to complete a node, but there are no in-progress nodes"); if !self.in_progress.is_empty() { self.add_len(id); } @@ -76,11 +75,14 @@ impl FileBuilder { assert!( self.in_progress.is_empty(), "some nodes in FileBuilder are unfinished: {:?}", - self.in_progress.iter().map(|&(idx, _)| self.nodes[idx].kind) + self.in_progress + .iter() + .map(|&(idx, _)| self.nodes[idx].kind) .collect::>() ); assert_eq!( - self.pos, (self.text.len() as u32).into(), + self.pos, + (self.text.len() as u32).into(), "nodes in FileBuilder do not cover the whole file" ); File { @@ -100,7 +102,6 @@ impl FileBuilder { child.parent = Some(self.current_id()); let id = self.new_node(child); { - let (parent, sibling) = *self.in_progress.last().unwrap(); let slot = if let Some(idx) = sibling { &mut self.nodes[idx].next_sibling @@ -140,12 +141,15 @@ fn grow(left: &mut TextRange, right: TextRange) { pub struct ErrorBuilder<'f> { message: Option, - builder: &'f mut FileBuilder + builder: &'f mut FileBuilder, } impl<'f> ErrorBuilder<'f> { fn new(builder: &'f mut FileBuilder) -> Self { - ErrorBuilder { message: None, builder } + ErrorBuilder { + message: None, + builder, + } } pub fn message>(mut self, m: M) -> Self { @@ -156,6 +160,10 @@ impl<'f> ErrorBuilder<'f> { pub fn emit(self) { let message = self.message.expect("Error message not set"); let &(node, after_child) = self.builder.in_progress.last().unwrap(); - self.builder.errors.push(SyntaxErrorData { node, message, after_child }) + self.builder.errors.push(SyntaxErrorData { + node, + message, + after_child, + }) } } diff --git a/src/tree/mod.rs b/src/tree/mod.rs index 3315b926e..a330caf54 100644 --- a/src/tree/mod.rs +++ b/src/tree/mod.rs @@ -1,4 +1,4 @@ -use text::{TextUnit, TextRange}; +use text::{TextRange, TextUnit}; use syntax_kinds::syntax_info; use std::fmt; @@ -11,15 +11,10 @@ pub use self::file_builder::{FileBuilder, Sink}; pub struct SyntaxKind(pub(crate) u32); pub(crate) const EOF: SyntaxKind = SyntaxKind(!0); -pub(crate) const EOF_INFO: SyntaxInfo = SyntaxInfo { - name: "EOF" -}; +pub(crate) const EOF_INFO: SyntaxInfo = SyntaxInfo { name: "EOF" }; pub(crate) const TOMBSTONE: SyntaxKind = SyntaxKind(!0 - 1); -pub(crate) const TOMBSTONE_INFO: SyntaxInfo = SyntaxInfo { - name: "TOMBSTONE" -}; - +pub(crate) const TOMBSTONE_INFO: SyntaxInfo = SyntaxInfo { name: "TOMBSTONE" }; impl SyntaxKind { fn info(self) -> &'static SyntaxInfo { @@ -38,7 +33,6 @@ impl fmt::Debug for SyntaxKind { } } - pub(crate) struct SyntaxInfo { pub name: &'static str, } @@ -58,7 +52,10 @@ pub struct File { impl File { pub fn root<'f>(&'f self) -> Node<'f> { assert!(!self.nodes.is_empty()); - Node { file: self, idx: NodeIdx(0) } + Node { + file: self, + idx: NodeIdx(0), + } } } @@ -86,14 +83,17 @@ impl<'f> Node<'f> { } pub fn children(&self) -> Children<'f> { - Children { next: self.as_node(self.data().first_child) } + Children { + next: self.as_node(self.data().first_child), + } } pub fn errors(&self) -> SyntaxErrors<'f> { let pos = self.file.errors.iter().position(|e| e.node == self.idx); - let next = pos - .map(|i| ErrorIdx(i as u32)) - .map(|idx| SyntaxError { file: self.file, idx }); + let next = pos.map(|i| ErrorIdx(i as u32)).map(|idx| SyntaxError { + file: self.file, + idx, + }); SyntaxErrors { next } } @@ -102,7 +102,10 @@ impl<'f> Node<'f> { } fn as_node(&self, idx: Option) -> Option> { - idx.map(|idx| Node { file: self.file, idx }) + idx.map(|idx| Node { + file: self.file, + idx, + }) } } @@ -118,8 +121,7 @@ impl<'f> cmp::PartialEq> for Node<'f> { } } -impl<'f> cmp::Eq for Node<'f> { -} +impl<'f> cmp::Eq for Node<'f> {} #[derive(Clone, Copy)] pub struct SyntaxError<'f> { @@ -134,7 +136,10 @@ impl<'f> SyntaxError<'f> { pub fn after_child(&self) -> Option> { let idx = self.data().after_child?; - Some(Node { file: self.file, idx }) + Some(Node { + file: self.file, + idx, + }) } fn data(&self) -> &'f SyntaxErrorData { @@ -148,7 +153,7 @@ impl<'f> SyntaxError<'f> { } let result = SyntaxError { file: self.file, - idx: ErrorIdx(next_idx) + idx: ErrorIdx(next_idx), }; if result.data().node != self.data().node { return None; @@ -185,7 +190,6 @@ impl<'f> Iterator for SyntaxErrors<'f> { } } - #[derive(Clone, Copy, PartialEq, Eq)] struct NodeIdx(u32); diff --git a/tests/lexer.rs b/tests/lexer.rs index 20840f456..397ebafdd 100644 --- a/tests/lexer.rs +++ b/tests/lexer.rs @@ -4,18 +4,15 @@ extern crate testutils; use std::fmt::Write; -use libsyntax2::{Token, tokenize}; +use libsyntax2::{tokenize, Token}; use testutils::dir_tests; #[test] fn lexer_tests() { - dir_tests( - &["lexer"], - |text| { - let tokens = tokenize(text); - dump_tokens(&tokens, text) - } - ) + dir_tests(&["lexer"], |text| { + let tokens = tokenize(text); + dump_tokens(&tokens, text) + }) } fn dump_tokens(tokens: &[Token], text: &str) -> String { @@ -29,4 +26,4 @@ fn dump_tokens(tokens: &[Token], text: &str) -> String { write!(acc, "{:?} {} {:?}\n", token.kind, token.len, token_text).unwrap() } acc -} \ No newline at end of file +} diff --git a/tests/parser.rs b/tests/parser.rs index 370b02c74..37c9021ef 100644 --- a/tests/parser.rs +++ b/tests/parser.rs @@ -2,18 +2,15 @@ extern crate file; extern crate libsyntax2; extern crate testutils; -use libsyntax2::{tokenize, parse}; +use libsyntax2::{parse, tokenize}; use libsyntax2::utils::dump_tree; use testutils::dir_tests; #[test] fn parser_tests() { - dir_tests( - &["parser/ok", "parser/err"], - |text| { - let tokens = tokenize(text); - let file = parse(text.to_string(), &tokens); - dump_tree(&file) - } - ) + dir_tests(&["parser/ok", "parser/err"], |text| { + let tokens = tokenize(text); + let file = parse(text.to_string(), &tokens); + dump_tree(&file) + }) } -- cgit v1.2.3