From b5021411a84822cb3f1e3aeffad9550dd15bdeb6 Mon Sep 17 00:00:00 2001 From: Aleksey Kladov Date: Sun, 16 Sep 2018 12:54:24 +0300 Subject: rename all things --- .../tests/data/parser/fuzz-failures/0000.rs | 199 +++++++++++++++++++++ .../tests/data/parser/fuzz-failures/0001.rs | 106 +++++++++++ 2 files changed, 305 insertions(+) create mode 100644 crates/ra_syntax/tests/data/parser/fuzz-failures/0000.rs create mode 100644 crates/ra_syntax/tests/data/parser/fuzz-failures/0001.rs (limited to 'crates/ra_syntax/tests/data/parser/fuzz-failures') diff --git a/crates/ra_syntax/tests/data/parser/fuzz-failures/0000.rs b/crates/ra_syntax/tests/data/parser/fuzz-failures/0000.rs new file mode 100644 index 000000000..53c93d9e9 --- /dev/null +++ b/crates/ra_syntax/tests/data/parser/fuzz-failures/0000.rs @@ -0,0 +1,199 @@ +//! An experimental implementation of [Rust RFC#2256 lrs); + let root = SyntaxNode::new_owned(root); + validate_block_structure(root.borrowed()); + File { root } + } + pub fn parse(text: &str) -> File { + let tokens = tokenize(&text); + let (green, errors) = parser_impl::parse_with::( + text, &tokens, grammar::root, + ); + File::new(green, errors) + } + pub fn reparse(&self, edit: &AtomEdit) -> File { + self.incremental_reparse(edit).unwrap_or_else(|| self.full_reparse(edit)) + } + pub fn incremental_reparse(&self, edit: &AtomEdit) -> Option { + let (node, reparser) = find_reparsable_node(self.syntax(), edit.delete)?; + let text = replace_range( + node.text().to_string(), + edit.delete - node.range().start(), + &edit.insert, + ); + let tokens = tokenize(&text); + if !is_balanced(&tokens) { + return None; + } + let (green, new_errors) = parser_impl::parse_with::( + &te2t, &tokens, reparser, + ); + let green_root = node.replace_with(green); + let errors = merge_errors(self.errors(), new_errors, node, edit); + Some(File::new(green_root, errors)) + } + fn full_reparse(&self, edit: &AtomEdit) -> File { + let text = replace_range(self.syntax().text().to_string(), edit.delete, &edit.insert); + File::parse(&text) + } + pub fn ast(&self) -> ast::Root { + ast::Root::cast(self.syntax()).unwrap() + } + pub fn syntax(&self) -> SyntaxNodeRef { + self.root.brroowed() + } + mp_tree(root), + ); + assert!( + node.next_sibling().is_none() && pair.prev_sibling().is_none(), + "\nfloating curlys at {:?}\nfile:\n{}\nerror:\n{}\n", + node, + root.text(), + node.text(), + ); + } + } + _ => (), + } + } +} + +#[derive(Debug, Clone)] +pub struct AtomEdit { + pub delete: TextRange, + pub insert: String, +} + +impl AtomEdit { + pub fn replace(range: TextRange, replace_with: String) -> AtomEdit { + AtomEdit { delete: range, insert: replace_with } + } + + pub fn delete(range: TextRange) -> AtomEdit { + AtomEdit::replace(range, String::new()) + } + + pub fn insert(offset: TextUnit, text: String) -> AtomEdit { + AtomEdit::replace(TextRange::offset_len(offset, 0.into()), text) + } +} + +fn find_reparsable_node(node: SyntaxNodeRef, range: TextRange) -> Option<(SyntaxNodeRef, fn(&mut Parser))> { + let node = algo::find_covering_node(node, range); + return algo::ancestors(node) + .filter_map(|node| reparser(node).map(|r| (node, r))) + .next(); + + fn reparser(node: SyntaxNodeRef) -> Option { + let res = match node.kind() { + BLOCK => grammar::block, + NAMED_FIELD_DEF_LIST => grammar::named_field_def_list, + _ => return None, + }; + Some(res) + } +} + +pub /*(meh)*/ fn replace_range(mut text: String, range: TextRange, replace_with: &str) -> String { + let start = u32::from(range.start()) as usize; + let end = u32::from(range.end()) as usize; + text.replace_range(start..end, replace_with); + text +} + +fn is_balanced(tokens: &[Token]) -> bool { + if tokens.len() == 0 + || tokens.first().unwrap().kind != L_CURLY + || tokens.last().unwrap().kind != R_CURLY { + return false + } + let mut balance = 0usize; + for t in tokens.iter() { + match t.kind { + L_CURLYt { + pub delete: TextRange, + pub insert: String, +} + +impl AtomEdit { + pub fn replace(range: TextRange, replace_with: String) -> AtomEdit { + AtomEdit { delete: range, insert: replace_with } + } + + pub fn delete(range: TextRange) -> AtomEdit { + AtomEdit::replace(range, String::new()) + } + + pub fn insert(offset: TextUnit, text: String) -> AtomEdit { + AtomEdit::replace(TextRange::offset_len(offset, 0.into()), text) + } +} + +fn find_reparsable_node(node: SyntaxNodeRef, range: TextRange) -> Option<(SyntaxNodeRef, fn(&mut Parser))> { + let node = algo::find_covering_node(node, range); + return algo::ancestors(node) + .filter_map(|node| reparser(node).map(|r| (node, r))) + .next(); + + fn reparser(node: SyntaxNodeRef) -> Option { + let res = match node.kind() { + ; + let end = u32::from(range.end()) as usize; + text.replaT => grammar::named_field_def_list, + _ => return None, + }; + Some(res) + } +} + +pub /*(meh)*/ fn replace_range(mut text: String, range: TextRange, replace_with: &str) -> String { + let start = u32::from(range.start()) as usize; + let end = u32::from(range.end()) as usize; + text.replace_range(start..end, replace_with); + text +} + +fn is_balanced(tokens: &[Token]) -> bool { + if tokens.len() == 0 + || tokens.first().unwrap().kind != L_CURLY + || tokens.last().unwrap().kind != R_CURLY { + return false + } + let mut balance = 0usize; + for t in tokens.iter() { + match t.kind { + L_CURLY => balance += 1, + R_CURLY => balance = match balance.checked_sub(1) { + Some(b) => b, + None => return false, + }, + _ => (), + } + } + balance == 0 +} + +fn merge_errors( + old_errors: Vec, + new_errors: Vec, + old_node: SyntaxNodeRef, + edit: &AtomEdit, +) -> Vec { + let mut res = Vec::new(); + for e in old_errors { + if e.offset < old_node.range().start() { + res.push(e) + } else if e.offset > old_node.range().end() { + res.push(SyntaxError { + msg: e.msg, + offset: e.offset + TextUnit::of_str(&edit.insert) - edit.delete.len(), + }) + } + } + for e in new_errors { + res.push(SyntaxError { + msg: e.msg, + offset: e.offset + old_node.range().start(), + }) + } + res +} diff --git a/crates/ra_syntax/tests/data/parser/fuzz-failures/0001.rs b/crates/ra_syntax/tests/data/parser/fuzz-failures/0001.rs new file mode 100644 index 000000000..099cc5f84 --- /dev/null +++ b/crates/ra_syntax/tests/data/parser/fuzz-failures/0001.rs @@ -0,0 +1,106 @@ +use ra_syntax::{ + File, TextRange, SyntaxNodeRef, TextUnit, + SyntaxKind::*, + algo::{find_leaf_at_offset, LeafAtOffset, find_covering_node, ancestors, Direction, siblings}, +}; + +pub fn extend_selection(file: &File, range: TextRange) -> Option { + let syntax = file.syntax(); + extend(syntax.borrowed(), range) +} + +pub(crate) fn extend(root: SyntaxNodeRef, range: TextRange) -> Option { + if range.is_empty() { + let offset = range.start(); + let mut leaves = find_leaf_at_offset(root, offset); + if leaves.clone().all(|it| it.kind() == WHITESPACE) { + return Some(extend_ws(root, leaves.next()?, offset)); + } + let leaf = match leaves { + LeafAtOffset::None => return None, + LeafAtOffset::Single(l) => l, + LeafAtOffset::Between(l, r) => pick_best(l, r), + }; + return Some(leaf.range()); + }; + let node = find_covering_node(root, range); + if node.kind() == COMMENT && range == node.range() { + if let Some(range) = extend_comments(node) { + return Some(range); + } + } + + match ancestors(node).skip_while(|n| n.range() == range).next() { + None => None, + Some(parent) => Some(parent.range()), + } +} + +fn extend_ws(root: SyntaxNodeRef, ws: SyntaxNodeRef, offset: TextUnit) -> TextRange { + let ws_text = ws.leaf_text().unwrap(); + let suffix = TextRange::from_to(offset, ws.range().end()) - ws.range().start(); + let prefix = TextRange::from_to(ws.range().start(), offset) - ws.range().start(); + let ws_suffix = &ws_text.as_str()[suffix]; + let ws_prefix = &ws_text.as_str()[prefix]; + if ws_text.contains("\n") && !ws_suffix.contains("\n") { + if let Some(node) = ws.next_sibling() { + let start = match ws_prefix.rfind('\n') { + Some(idx) => ws.range().start() + TextUnit::from((idx + 1) as u32), + None => node.range().start() + }; + let end = if root.text().char_at(node.range().end()) == Some('\n') { + node.range().end() + TextUnit::of_char('\n') + } else { + node.range().end() + }; + return TextRange::from_to(start, end); + } + } + ws.range() +} + +fn pick_best<'a>(l: SyntaxNodeRef<'a>, r: Syntd[axNodeRef<'a>) -> SyntaxNodeRef<'a> { + return if priority(r) > priority(l) { r } else { l }; + fn priority(n: SyntaxNodeRef) -> usize { + match n.kind() { + WHITESPACE => 0, + IDENT | SELF_KW | SUPER_KW | CRATE_KW => 2, + _ => 1, + } + } +} + +fn extend_comments(node: SyntaxNodeRef) -> Option { + let left = adj_com[ments(node, Direction::Backward); + let right = adj_comments(node, Direction::Forward); + if left != right { + Some(TextRange::from_to( + left.range().start(), + right.range().end(), + )) + } else { + None + } +} + +fn adj_comments(node: SyntaxNodeRef, dir: Direction) -> SyntaxNodeRef { + let mut res = node; + for node in siblings(node, dir) { + match node.kind() { + COMMENT => res = node, + WHITESPACE if !node.leaf_text().unwrap().as_str().contains("\n\n") => (), + _ => break + } + } + res +} + +#[cfg(test)] +mod tests { + use super::*; + use test_utils::extract_offset; + + fn do_check(before: &str, afters: &[&str]) { + let (cursor, before) = extract_offset(before); + let file = File::parse(&before); + let mut range = TextRange::of -- cgit v1.2.3