diff options
Diffstat (limited to 'xtask')
-rw-r--r-- | xtask/src/ast_src.rs | 2 | ||||
-rw-r--r-- | xtask/src/codegen.rs | 89 | ||||
-rw-r--r-- | xtask/src/codegen/gen_assists_docs.rs | 205 | ||||
-rw-r--r-- | xtask/src/codegen/gen_feature_docs.rs | 75 | ||||
-rw-r--r-- | xtask/src/lib.rs | 6 | ||||
-rw-r--r-- | xtask/src/main.rs | 1 | ||||
-rw-r--r-- | xtask/tests/tidy.rs | 14 |
7 files changed, 268 insertions, 124 deletions
diff --git a/xtask/src/ast_src.rs b/xtask/src/ast_src.rs index d4621930e..f60f0fb16 100644 --- a/xtask/src/ast_src.rs +++ b/xtask/src/ast_src.rs | |||
@@ -1058,7 +1058,7 @@ pub(crate) const AST_SRC: AstSrc = AstSrc { | |||
1058 | /// [Reference](https://doc.rust-lang.org/reference/expressions/block-expr.html) | 1058 | /// [Reference](https://doc.rust-lang.org/reference/expressions/block-expr.html) |
1059 | /// [Labels for blocks RFC](https://github.com/rust-lang/rfcs/blob/master/text/2046-label-break-value.md) | 1059 | /// [Labels for blocks RFC](https://github.com/rust-lang/rfcs/blob/master/text/2046-label-break-value.md) |
1060 | struct BlockExpr: AttrsOwner, ModuleItemOwner { | 1060 | struct BlockExpr: AttrsOwner, ModuleItemOwner { |
1061 | T!['{'], statements: [Stmt], Expr, T!['}'], | 1061 | Label, T!['{'], statements: [Stmt], Expr, T!['}'], |
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | /// Return expression. | 1064 | /// Return expression. |
diff --git a/xtask/src/codegen.rs b/xtask/src/codegen.rs index b4907f4b2..5511c01d5 100644 --- a/xtask/src/codegen.rs +++ b/xtask/src/codegen.rs | |||
@@ -8,14 +8,18 @@ | |||
8 | mod gen_syntax; | 8 | mod gen_syntax; |
9 | mod gen_parser_tests; | 9 | mod gen_parser_tests; |
10 | mod gen_assists_docs; | 10 | mod gen_assists_docs; |
11 | mod gen_feature_docs; | ||
11 | 12 | ||
12 | use std::{mem, path::Path}; | 13 | use std::{ |
14 | fmt, mem, | ||
15 | path::{Path, PathBuf}, | ||
16 | }; | ||
13 | 17 | ||
14 | use crate::{not_bash::fs2, Result}; | 18 | use crate::{not_bash::fs2, project_root, Result}; |
15 | 19 | ||
16 | pub use self::{ | 20 | pub use self::{ |
17 | gen_assists_docs::generate_assists_docs, gen_parser_tests::generate_parser_tests, | 21 | gen_assists_docs::generate_assists_docs, gen_feature_docs::generate_feature_docs, |
18 | gen_syntax::generate_syntax, | 22 | gen_parser_tests::generate_parser_tests, gen_syntax::generate_syntax, |
19 | }; | 23 | }; |
20 | 24 | ||
21 | const GRAMMAR_DIR: &str = "crates/ra_parser/src/grammar"; | 25 | const GRAMMAR_DIR: &str = "crates/ra_parser/src/grammar"; |
@@ -28,7 +32,6 @@ const AST_TOKENS: &str = "crates/ra_syntax/src/ast/generated/tokens.rs"; | |||
28 | 32 | ||
29 | const ASSISTS_DIR: &str = "crates/ra_assists/src/handlers"; | 33 | const ASSISTS_DIR: &str = "crates/ra_assists/src/handlers"; |
30 | const ASSISTS_TESTS: &str = "crates/ra_assists/src/tests/generated.rs"; | 34 | const ASSISTS_TESTS: &str = "crates/ra_assists/src/tests/generated.rs"; |
31 | const ASSISTS_DOCS: &str = "docs/user/assists.md"; | ||
32 | 35 | ||
33 | #[derive(Debug, PartialEq, Eq, Clone, Copy)] | 36 | #[derive(Debug, PartialEq, Eq, Clone, Copy)] |
34 | pub enum Mode { | 37 | pub enum Mode { |
@@ -40,7 +43,7 @@ pub enum Mode { | |||
40 | /// With verify = false, | 43 | /// With verify = false, |
41 | fn update(path: &Path, contents: &str, mode: Mode) -> Result<()> { | 44 | fn update(path: &Path, contents: &str, mode: Mode) -> Result<()> { |
42 | match fs2::read_to_string(path) { | 45 | match fs2::read_to_string(path) { |
43 | Ok(ref old_contents) if normalize(old_contents) == normalize(contents) => { | 46 | Ok(old_contents) if normalize(&old_contents) == normalize(contents) => { |
44 | return Ok(()); | 47 | return Ok(()); |
45 | } | 48 | } |
46 | _ => (), | 49 | _ => (), |
@@ -58,35 +61,85 @@ fn update(path: &Path, contents: &str, mode: Mode) -> Result<()> { | |||
58 | } | 61 | } |
59 | 62 | ||
60 | fn extract_comment_blocks(text: &str) -> Vec<Vec<String>> { | 63 | fn extract_comment_blocks(text: &str) -> Vec<Vec<String>> { |
61 | do_extract_comment_blocks(text, false) | 64 | do_extract_comment_blocks(text, false).into_iter().map(|(_line, block)| block).collect() |
65 | } | ||
66 | |||
67 | fn extract_comment_blocks_with_empty_lines(tag: &str, text: &str) -> Vec<CommentBlock> { | ||
68 | assert!(tag.starts_with(char::is_uppercase)); | ||
69 | let tag = format!("{}:", tag); | ||
70 | let mut res = Vec::new(); | ||
71 | for (line, mut block) in do_extract_comment_blocks(text, true) { | ||
72 | let first = block.remove(0); | ||
73 | if first.starts_with(&tag) { | ||
74 | let id = first[tag.len()..].trim().to_string(); | ||
75 | let block = CommentBlock { id, line, contents: block }; | ||
76 | res.push(block); | ||
77 | } | ||
78 | } | ||
79 | res | ||
62 | } | 80 | } |
63 | 81 | ||
64 | fn extract_comment_blocks_with_empty_lines(text: &str) -> Vec<Vec<String>> { | 82 | struct CommentBlock { |
65 | do_extract_comment_blocks(text, true) | 83 | id: String, |
84 | line: usize, | ||
85 | contents: Vec<String>, | ||
66 | } | 86 | } |
67 | 87 | ||
68 | fn do_extract_comment_blocks(text: &str, allow_blocks_with_empty_lines: bool) -> Vec<Vec<String>> { | 88 | fn do_extract_comment_blocks( |
89 | text: &str, | ||
90 | allow_blocks_with_empty_lines: bool, | ||
91 | ) -> Vec<(usize, Vec<String>)> { | ||
69 | let mut res = Vec::new(); | 92 | let mut res = Vec::new(); |
70 | 93 | ||
71 | let prefix = "// "; | 94 | let prefix = "// "; |
72 | let lines = text.lines().map(str::trim_start); | 95 | let lines = text.lines().map(str::trim_start); |
73 | 96 | ||
74 | let mut block = vec![]; | 97 | let mut block = (0, vec![]); |
75 | for line in lines { | 98 | for (line_num, line) in lines.enumerate() { |
76 | if line == "//" && allow_blocks_with_empty_lines { | 99 | if line == "//" && allow_blocks_with_empty_lines { |
77 | block.push(String::new()); | 100 | block.1.push(String::new()); |
78 | continue; | 101 | continue; |
79 | } | 102 | } |
80 | 103 | ||
81 | let is_comment = line.starts_with(prefix); | 104 | let is_comment = line.starts_with(prefix); |
82 | if is_comment { | 105 | if is_comment { |
83 | block.push(line[prefix.len()..].to_string()); | 106 | block.1.push(line[prefix.len()..].to_string()); |
84 | } else if !block.is_empty() { | 107 | } else { |
85 | res.push(mem::replace(&mut block, Vec::new())); | 108 | if !block.1.is_empty() { |
109 | res.push(mem::take(&mut block)); | ||
110 | } | ||
111 | block.0 = line_num + 2; | ||
86 | } | 112 | } |
87 | } | 113 | } |
88 | if !block.is_empty() { | 114 | if !block.1.is_empty() { |
89 | res.push(mem::replace(&mut block, Vec::new())) | 115 | res.push(block) |
90 | } | 116 | } |
91 | res | 117 | res |
92 | } | 118 | } |
119 | |||
120 | #[derive(Debug)] | ||
121 | struct Location { | ||
122 | file: PathBuf, | ||
123 | line: usize, | ||
124 | } | ||
125 | |||
126 | impl Location { | ||
127 | fn new(file: PathBuf, line: usize) -> Self { | ||
128 | Self { file, line } | ||
129 | } | ||
130 | } | ||
131 | |||
132 | impl fmt::Display for Location { | ||
133 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||
134 | let path = self.file.strip_prefix(&project_root()).unwrap().display().to_string(); | ||
135 | let path = path.replace('\\', "/"); | ||
136 | let name = self.file.file_name().unwrap(); | ||
137 | write!( | ||
138 | f, | ||
139 | "https://github.com/rust-analyzer/rust-analyzer/blob/master/{}#L{}[{}]", | ||
140 | path, | ||
141 | self.line, | ||
142 | name.to_str().unwrap() | ||
143 | ) | ||
144 | } | ||
145 | } | ||
diff --git a/xtask/src/codegen/gen_assists_docs.rs b/xtask/src/codegen/gen_assists_docs.rs index 4bd6b5f0c..6c1be5350 100644 --- a/xtask/src/codegen/gen_assists_docs.rs +++ b/xtask/src/codegen/gen_assists_docs.rs | |||
@@ -1,102 +1,112 @@ | |||
1 | //! Generates `assists.md` documentation. | 1 | //! Generates `assists.md` documentation. |
2 | 2 | ||
3 | use std::{fs, path::Path}; | 3 | use std::{fmt, fs, path::Path}; |
4 | 4 | ||
5 | use crate::{ | 5 | use crate::{ |
6 | codegen::{self, extract_comment_blocks_with_empty_lines, Mode}, | 6 | codegen::{self, extract_comment_blocks_with_empty_lines, Location, Mode}, |
7 | project_root, rust_files, Result, | 7 | project_root, rust_files, Result, |
8 | }; | 8 | }; |
9 | 9 | ||
10 | pub fn generate_assists_docs(mode: Mode) -> Result<()> { | 10 | pub fn generate_assists_docs(mode: Mode) -> Result<()> { |
11 | let assists = collect_assists()?; | 11 | let assists = Assist::collect()?; |
12 | generate_tests(&assists, mode)?; | 12 | generate_tests(&assists, mode)?; |
13 | generate_docs(&assists, mode)?; | 13 | |
14 | let contents = assists.into_iter().map(|it| it.to_string()).collect::<Vec<_>>().join("\n\n"); | ||
15 | let contents = contents.trim().to_string() + "\n"; | ||
16 | let dst = project_root().join("docs/user/generated_assists.adoc"); | ||
17 | codegen::update(&dst, &contents, mode)?; | ||
18 | |||
14 | Ok(()) | 19 | Ok(()) |
15 | } | 20 | } |
16 | 21 | ||
17 | #[derive(Debug)] | 22 | #[derive(Debug)] |
18 | struct Assist { | 23 | struct Assist { |
19 | id: String, | 24 | id: String, |
25 | location: Location, | ||
20 | doc: String, | 26 | doc: String, |
21 | before: String, | 27 | before: String, |
22 | after: String, | 28 | after: String, |
23 | } | 29 | } |
24 | 30 | ||
25 | fn hide_hash_comments(text: &str) -> String { | 31 | impl Assist { |
26 | text.split('\n') // want final newline | 32 | fn collect() -> Result<Vec<Assist>> { |
27 | .filter(|&it| !(it.starts_with("# ") || it == "#")) | 33 | let mut res = Vec::new(); |
28 | .map(|it| format!("{}\n", it)) | 34 | for path in rust_files(&project_root().join(codegen::ASSISTS_DIR)) { |
29 | .collect() | 35 | collect_file(&mut res, path.as_path())?; |
30 | } | ||
31 | |||
32 | fn reveal_hash_comments(text: &str) -> String { | ||
33 | text.split('\n') // want final newline | ||
34 | .map(|it| { | ||
35 | if it.starts_with("# ") { | ||
36 | &it[2..] | ||
37 | } else if it == "#" { | ||
38 | "" | ||
39 | } else { | ||
40 | it | ||
41 | } | ||
42 | }) | ||
43 | .map(|it| format!("{}\n", it)) | ||
44 | .collect() | ||
45 | } | ||
46 | |||
47 | fn collect_assists() -> Result<Vec<Assist>> { | ||
48 | let mut res = Vec::new(); | ||
49 | for path in rust_files(&project_root().join(codegen::ASSISTS_DIR)) { | ||
50 | collect_file(&mut res, path.as_path())?; | ||
51 | } | ||
52 | res.sort_by(|lhs, rhs| lhs.id.cmp(&rhs.id)); | ||
53 | return Ok(res); | ||
54 | |||
55 | fn collect_file(acc: &mut Vec<Assist>, path: &Path) -> Result<()> { | ||
56 | let text = fs::read_to_string(path)?; | ||
57 | let comment_blocks = extract_comment_blocks_with_empty_lines(&text); | ||
58 | |||
59 | for block in comment_blocks { | ||
60 | // FIXME: doesn't support blank lines yet, need to tweak | ||
61 | // `extract_comment_blocks` for that. | ||
62 | let mut lines = block.iter(); | ||
63 | let first_line = lines.next().unwrap(); | ||
64 | if !first_line.starts_with("Assist: ") { | ||
65 | continue; | ||
66 | } | ||
67 | let id = first_line["Assist: ".len()..].to_string(); | ||
68 | assert!( | ||
69 | id.chars().all(|it| it.is_ascii_lowercase() || it == '_'), | ||
70 | "invalid assist id: {:?}", | ||
71 | id | ||
72 | ); | ||
73 | |||
74 | let doc = take_until(lines.by_ref(), "```").trim().to_string(); | ||
75 | assert!( | ||
76 | doc.chars().next().unwrap().is_ascii_uppercase() && doc.ends_with('.'), | ||
77 | "\n\n{}: assist docs should be proper sentences, with capitalization and a full stop at the end.\n\n{}\n\n", | ||
78 | id, doc, | ||
79 | ); | ||
80 | |||
81 | let before = take_until(lines.by_ref(), "```"); | ||
82 | |||
83 | assert_eq!(lines.next().unwrap().as_str(), "->"); | ||
84 | assert_eq!(lines.next().unwrap().as_str(), "```"); | ||
85 | let after = take_until(lines.by_ref(), "```"); | ||
86 | acc.push(Assist { id, doc, before, after }) | ||
87 | } | 36 | } |
37 | res.sort_by(|lhs, rhs| lhs.id.cmp(&rhs.id)); | ||
38 | return Ok(res); | ||
39 | |||
40 | fn collect_file(acc: &mut Vec<Assist>, path: &Path) -> Result<()> { | ||
41 | let text = fs::read_to_string(path)?; | ||
42 | let comment_blocks = extract_comment_blocks_with_empty_lines("Assist", &text); | ||
43 | |||
44 | for block in comment_blocks { | ||
45 | // FIXME: doesn't support blank lines yet, need to tweak | ||
46 | // `extract_comment_blocks` for that. | ||
47 | let id = block.id; | ||
48 | assert!( | ||
49 | id.chars().all(|it| it.is_ascii_lowercase() || it == '_'), | ||
50 | "invalid assist id: {:?}", | ||
51 | id | ||
52 | ); | ||
53 | let mut lines = block.contents.iter(); | ||
54 | |||
55 | let doc = take_until(lines.by_ref(), "```").trim().to_string(); | ||
56 | assert!( | ||
57 | doc.chars().next().unwrap().is_ascii_uppercase() && doc.ends_with('.'), | ||
58 | "\n\n{}: assist docs should be proper sentences, with capitalization and a full stop at the end.\n\n{}\n\n", | ||
59 | id, doc, | ||
60 | ); | ||
61 | |||
62 | let before = take_until(lines.by_ref(), "```"); | ||
63 | |||
64 | assert_eq!(lines.next().unwrap().as_str(), "->"); | ||
65 | assert_eq!(lines.next().unwrap().as_str(), "```"); | ||
66 | let after = take_until(lines.by_ref(), "```"); | ||
67 | let location = Location::new(path.to_path_buf(), block.line); | ||
68 | acc.push(Assist { id, location, doc, before, after }) | ||
69 | } | ||
88 | 70 | ||
89 | fn take_until<'a>(lines: impl Iterator<Item = &'a String>, marker: &str) -> String { | 71 | fn take_until<'a>(lines: impl Iterator<Item = &'a String>, marker: &str) -> String { |
90 | let mut buf = Vec::new(); | 72 | let mut buf = Vec::new(); |
91 | for line in lines { | 73 | for line in lines { |
92 | if line == marker { | 74 | if line == marker { |
93 | break; | 75 | break; |
76 | } | ||
77 | buf.push(line.clone()); | ||
94 | } | 78 | } |
95 | buf.push(line.clone()); | 79 | buf.join("\n") |
96 | } | 80 | } |
97 | buf.join("\n") | 81 | Ok(()) |
98 | } | 82 | } |
99 | Ok(()) | 83 | } |
84 | } | ||
85 | |||
86 | impl fmt::Display for Assist { | ||
87 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||
88 | let before = self.before.replace("<|>", "┃"); // Unicode pseudo-graphics bar | ||
89 | let after = self.after.replace("<|>", "┃"); | ||
90 | writeln!( | ||
91 | f, | ||
92 | "[discrete]\n=== `{}` | ||
93 | **Source:** {} | ||
94 | |||
95 | {} | ||
96 | |||
97 | .Before | ||
98 | ```rust | ||
99 | {}``` | ||
100 | |||
101 | .After | ||
102 | ```rust | ||
103 | {}```", | ||
104 | self.id, | ||
105 | self.location, | ||
106 | self.doc, | ||
107 | hide_hash_comments(&before), | ||
108 | hide_hash_comments(&after) | ||
109 | ) | ||
100 | } | 110 | } |
101 | } | 111 | } |
102 | 112 | ||
@@ -127,33 +137,24 @@ r#####" | |||
127 | codegen::update(&project_root().join(codegen::ASSISTS_TESTS), &buf, mode) | 137 | codegen::update(&project_root().join(codegen::ASSISTS_TESTS), &buf, mode) |
128 | } | 138 | } |
129 | 139 | ||
130 | fn generate_docs(assists: &[Assist], mode: Mode) -> Result<()> { | 140 | fn hide_hash_comments(text: &str) -> String { |
131 | let mut buf = String::from( | 141 | text.split('\n') // want final newline |
132 | "# Assists\n\nCursor position or selection is signified by `┃` character.\n\n", | 142 | .filter(|&it| !(it.starts_with("# ") || it == "#")) |
133 | ); | 143 | .map(|it| format!("{}\n", it)) |
134 | 144 | .collect() | |
135 | for assist in assists { | 145 | } |
136 | let before = assist.before.replace("<|>", "┃"); // Unicode pseudo-graphics bar | ||
137 | let after = assist.after.replace("<|>", "┃"); | ||
138 | let docs = format!( | ||
139 | " | ||
140 | ## `{}` | ||
141 | |||
142 | {} | ||
143 | |||
144 | ```rust | ||
145 | // BEFORE | ||
146 | {} | ||
147 | // AFTER | ||
148 | {}``` | ||
149 | ", | ||
150 | assist.id, | ||
151 | assist.doc, | ||
152 | hide_hash_comments(&before), | ||
153 | hide_hash_comments(&after) | ||
154 | ); | ||
155 | buf.push_str(&docs); | ||
156 | } | ||
157 | 146 | ||
158 | codegen::update(&project_root().join(codegen::ASSISTS_DOCS), &buf, mode) | 147 | fn reveal_hash_comments(text: &str) -> String { |
148 | text.split('\n') // want final newline | ||
149 | .map(|it| { | ||
150 | if it.starts_with("# ") { | ||
151 | &it[2..] | ||
152 | } else if it == "#" { | ||
153 | "" | ||
154 | } else { | ||
155 | it | ||
156 | } | ||
157 | }) | ||
158 | .map(|it| format!("{}\n", it)) | ||
159 | .collect() | ||
159 | } | 160 | } |
diff --git a/xtask/src/codegen/gen_feature_docs.rs b/xtask/src/codegen/gen_feature_docs.rs new file mode 100644 index 000000000..31bc3839d --- /dev/null +++ b/xtask/src/codegen/gen_feature_docs.rs | |||
@@ -0,0 +1,75 @@ | |||
1 | //! Generates `assists.md` documentation. | ||
2 | |||
3 | use std::{fmt, fs, path::PathBuf}; | ||
4 | |||
5 | use crate::{ | ||
6 | codegen::{self, extract_comment_blocks_with_empty_lines, Location, Mode}, | ||
7 | project_root, rust_files, Result, | ||
8 | }; | ||
9 | |||
10 | pub fn generate_feature_docs(mode: Mode) -> Result<()> { | ||
11 | let features = Feature::collect()?; | ||
12 | let contents = features.into_iter().map(|it| it.to_string()).collect::<Vec<_>>().join("\n\n"); | ||
13 | let contents = contents.trim().to_string() + "\n"; | ||
14 | let dst = project_root().join("docs/user/generated_features.adoc"); | ||
15 | codegen::update(&dst, &contents, mode)?; | ||
16 | Ok(()) | ||
17 | } | ||
18 | |||
19 | #[derive(Debug)] | ||
20 | struct Feature { | ||
21 | id: String, | ||
22 | location: Location, | ||
23 | doc: String, | ||
24 | } | ||
25 | |||
26 | impl Feature { | ||
27 | fn collect() -> Result<Vec<Feature>> { | ||
28 | let mut res = Vec::new(); | ||
29 | for path in rust_files(&project_root()) { | ||
30 | collect_file(&mut res, path)?; | ||
31 | } | ||
32 | res.sort_by(|lhs, rhs| lhs.id.cmp(&rhs.id)); | ||
33 | return Ok(res); | ||
34 | |||
35 | fn collect_file(acc: &mut Vec<Feature>, path: PathBuf) -> Result<()> { | ||
36 | let text = fs::read_to_string(&path)?; | ||
37 | let comment_blocks = extract_comment_blocks_with_empty_lines("Feature", &text); | ||
38 | |||
39 | for block in comment_blocks { | ||
40 | let id = block.id; | ||
41 | assert!(is_valid_feature_name(&id), "invalid feature name: {:?}", id); | ||
42 | let doc = block.contents.join("\n"); | ||
43 | let location = Location::new(path.clone(), block.line); | ||
44 | acc.push(Feature { id, location, doc }) | ||
45 | } | ||
46 | |||
47 | Ok(()) | ||
48 | } | ||
49 | } | ||
50 | } | ||
51 | |||
52 | fn is_valid_feature_name(feature: &str) -> bool { | ||
53 | 'word: for word in feature.split_whitespace() { | ||
54 | for &short in ["to", "and"].iter() { | ||
55 | if word == short { | ||
56 | continue 'word; | ||
57 | } | ||
58 | } | ||
59 | for &short in ["To", "And"].iter() { | ||
60 | if word == short { | ||
61 | return false; | ||
62 | } | ||
63 | } | ||
64 | if !word.starts_with(char::is_uppercase) { | ||
65 | return false; | ||
66 | } | ||
67 | } | ||
68 | true | ||
69 | } | ||
70 | |||
71 | impl fmt::Display for Feature { | ||
72 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | ||
73 | writeln!(f, "=== {}\n**Source:** {}\n{}", self.id, self.location, self.doc) | ||
74 | } | ||
75 | } | ||
diff --git a/xtask/src/lib.rs b/xtask/src/lib.rs index 2b7a461e5..874957885 100644 --- a/xtask/src/lib.rs +++ b/xtask/src/lib.rs | |||
@@ -191,7 +191,11 @@ Release: release:{}[] | |||
191 | let path = changelog_dir.join(format!("{}-changelog-{}.adoc", today, changelog_n)); | 191 | let path = changelog_dir.join(format!("{}-changelog-{}.adoc", today, changelog_n)); |
192 | fs2::write(&path, &contents)?; | 192 | fs2::write(&path, &contents)?; |
193 | 193 | ||
194 | fs2::copy(project_root().join("./docs/user/readme.adoc"), website_root.join("manual.adoc"))?; | 194 | for &adoc in ["manual.adoc", "generated_features.adoc", "generated_assists.adoc"].iter() { |
195 | let src = project_root().join("./docs/user/").join(adoc); | ||
196 | let dst = website_root.join(adoc); | ||
197 | fs2::copy(src, dst)?; | ||
198 | } | ||
195 | 199 | ||
196 | let tags = run!("git tag --list"; echo = false)?; | 200 | let tags = run!("git tag --list"; echo = false)?; |
197 | let prev_tag = tags.lines().filter(|line| is_release_tag(line)).last().unwrap(); | 201 | let prev_tag = tags.lines().filter(|line| is_release_tag(line)).last().unwrap(); |
diff --git a/xtask/src/main.rs b/xtask/src/main.rs index dff3ce4a1..9d7cdd114 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs | |||
@@ -75,6 +75,7 @@ FLAGS: | |||
75 | codegen::generate_syntax(Mode::Overwrite)?; | 75 | codegen::generate_syntax(Mode::Overwrite)?; |
76 | codegen::generate_parser_tests(Mode::Overwrite)?; | 76 | codegen::generate_parser_tests(Mode::Overwrite)?; |
77 | codegen::generate_assists_docs(Mode::Overwrite)?; | 77 | codegen::generate_assists_docs(Mode::Overwrite)?; |
78 | codegen::generate_feature_docs(Mode::Overwrite)?; | ||
78 | Ok(()) | 79 | Ok(()) |
79 | } | 80 | } |
80 | "format" => { | 81 | "format" => { |
diff --git a/xtask/tests/tidy.rs b/xtask/tests/tidy.rs index 2e9fcf07c..4ac5d929f 100644 --- a/xtask/tests/tidy.rs +++ b/xtask/tests/tidy.rs | |||
@@ -31,6 +31,13 @@ fn generated_assists_are_fresh() { | |||
31 | } | 31 | } |
32 | 32 | ||
33 | #[test] | 33 | #[test] |
34 | fn generated_features_are_fresh() { | ||
35 | if let Err(error) = codegen::generate_feature_docs(Mode::Verify) { | ||
36 | panic!("{}. Please update features by running `cargo xtask codegen`", error); | ||
37 | } | ||
38 | } | ||
39 | |||
40 | #[test] | ||
34 | fn check_code_formatting() { | 41 | fn check_code_formatting() { |
35 | if let Err(error) = run_rustfmt(Mode::Verify) { | 42 | if let Err(error) = run_rustfmt(Mode::Verify) { |
36 | panic!("{}. Please format the code by running `cargo format`", error); | 43 | panic!("{}. Please format the code by running `cargo format`", error); |
@@ -95,7 +102,7 @@ impl TidyDocs { | |||
95 | fn visit(&mut self, path: &Path, text: &str) { | 102 | fn visit(&mut self, path: &Path, text: &str) { |
96 | // Test hopefully don't really need comments, and for assists we already | 103 | // Test hopefully don't really need comments, and for assists we already |
97 | // have special comments which are source of doc tests and user docs. | 104 | // have special comments which are source of doc tests and user docs. |
98 | if is_exclude_dir(path, &["tests", "test_data", "handlers"]) { | 105 | if is_exclude_dir(path, &["tests", "test_data"]) { |
99 | return; | 106 | return; |
100 | } | 107 | } |
101 | 108 | ||
@@ -110,9 +117,12 @@ impl TidyDocs { | |||
110 | 117 | ||
111 | if first_line.starts_with("//!") { | 118 | if first_line.starts_with("//!") { |
112 | if first_line.contains("FIXME") { | 119 | if first_line.contains("FIXME") { |
113 | self.contains_fixme.push(path.to_path_buf()) | 120 | self.contains_fixme.push(path.to_path_buf()); |
114 | } | 121 | } |
115 | } else { | 122 | } else { |
123 | if text.contains("// Feature:") || text.contains("// Assist:") { | ||
124 | return; | ||
125 | } | ||
116 | self.missing_docs.push(path.display().to_string()); | 126 | self.missing_docs.push(path.display().to_string()); |
117 | } | 127 | } |
118 | 128 | ||