aboutsummaryrefslogtreecommitdiff
path: root/crates/ra_flycheck/src/lib.rs
diff options
context:
space:
mode:
Diffstat (limited to 'crates/ra_flycheck/src/lib.rs')
-rw-r--r--crates/ra_flycheck/src/lib.rs374
1 files changed, 374 insertions, 0 deletions
diff --git a/crates/ra_flycheck/src/lib.rs b/crates/ra_flycheck/src/lib.rs
new file mode 100644
index 000000000..38940a77b
--- /dev/null
+++ b/crates/ra_flycheck/src/lib.rs
@@ -0,0 +1,374 @@
1//! cargo_check provides the functionality needed to run `cargo check` or
2//! another compatible command (f.x. clippy) in a background thread and provide
3//! LSP diagnostics based on the output of the command.
4use cargo_metadata::Message;
5use crossbeam_channel::{never, select, unbounded, Receiver, RecvError, Sender};
6use lsp_types::{
7 CodeAction, CodeActionOrCommand, Diagnostic, Url, WorkDoneProgress, WorkDoneProgressBegin,
8 WorkDoneProgressEnd, WorkDoneProgressReport,
9};
10use std::{
11 error, fmt,
12 io::{BufRead, BufReader},
13 path::{Path, PathBuf},
14 process::{Command, Stdio},
15 time::Instant,
16};
17
18mod conv;
19
20use crate::conv::{map_rust_diagnostic_to_lsp, MappedRustDiagnostic};
21
22pub use crate::conv::url_from_path_with_drive_lowercasing;
23
24#[derive(Clone, Debug)]
25pub struct CheckConfig {
26 pub enable: bool,
27 pub args: Vec<String>,
28 pub command: String,
29 pub all_targets: bool,
30}
31
32/// CheckWatcher wraps the shared state and communication machinery used for
33/// running `cargo check` (or other compatible command) and providing
34/// diagnostics based on the output.
35/// The spawned thread is shut down when this struct is dropped.
36#[derive(Debug)]
37pub struct CheckWatcher {
38 // XXX: drop order is significant
39 cmd_send: Sender<CheckCommand>,
40 handle: Option<jod_thread::JoinHandle<()>>,
41 pub task_recv: Receiver<CheckTask>,
42}
43
44impl CheckWatcher {
45 pub fn new(config: CheckConfig, workspace_root: PathBuf) -> CheckWatcher {
46 let (task_send, task_recv) = unbounded::<CheckTask>();
47 let (cmd_send, cmd_recv) = unbounded::<CheckCommand>();
48 let handle = jod_thread::spawn(move || {
49 let mut check = CheckWatcherThread::new(config, workspace_root);
50 check.run(&task_send, &cmd_recv);
51 });
52 CheckWatcher { task_recv, cmd_send, handle: Some(handle) }
53 }
54
55 /// Schedule a re-start of the cargo check worker.
56 pub fn update(&self) {
57 self.cmd_send.send(CheckCommand::Update).unwrap();
58 }
59}
60
61#[derive(Debug)]
62pub enum CheckTask {
63 /// Request a clearing of all cached diagnostics from the check watcher
64 ClearDiagnostics,
65
66 /// Request adding a diagnostic with fixes included to a file
67 AddDiagnostic { url: Url, diagnostic: Diagnostic, fixes: Vec<CodeActionOrCommand> },
68
69 /// Request check progress notification to client
70 Status(WorkDoneProgress),
71}
72
73pub enum CheckCommand {
74 /// Request re-start of check thread
75 Update,
76}
77
78struct CheckWatcherThread {
79 options: CheckConfig,
80 workspace_root: PathBuf,
81 watcher: WatchThread,
82 last_update_req: Option<Instant>,
83}
84
85impl CheckWatcherThread {
86 fn new(options: CheckConfig, workspace_root: PathBuf) -> CheckWatcherThread {
87 CheckWatcherThread {
88 options,
89 workspace_root,
90 watcher: WatchThread::dummy(),
91 last_update_req: None,
92 }
93 }
94
95 fn run(&mut self, task_send: &Sender<CheckTask>, cmd_recv: &Receiver<CheckCommand>) {
96 // If we rerun the thread, we need to discard the previous check results first
97 self.clean_previous_results(task_send);
98
99 loop {
100 select! {
101 recv(&cmd_recv) -> cmd => match cmd {
102 Ok(cmd) => self.handle_command(cmd),
103 Err(RecvError) => {
104 // Command channel has closed, so shut down
105 break;
106 },
107 },
108 recv(self.watcher.message_recv) -> msg => match msg {
109 Ok(msg) => self.handle_message(msg, task_send),
110 Err(RecvError) => {
111 // Watcher finished, replace it with a never channel to
112 // avoid busy-waiting.
113 std::mem::replace(&mut self.watcher.message_recv, never());
114 },
115 }
116 };
117
118 if self.should_recheck() {
119 self.last_update_req.take();
120 task_send.send(CheckTask::ClearDiagnostics).unwrap();
121
122 // Replace with a dummy watcher first so we drop the original and wait for completion
123 std::mem::replace(&mut self.watcher, WatchThread::dummy());
124
125 // Then create the actual new watcher
126 self.watcher = WatchThread::new(&self.options, &self.workspace_root);
127 }
128 }
129 }
130
131 fn clean_previous_results(&self, task_send: &Sender<CheckTask>) {
132 task_send.send(CheckTask::ClearDiagnostics).unwrap();
133 task_send
134 .send(CheckTask::Status(WorkDoneProgress::End(WorkDoneProgressEnd { message: None })))
135 .unwrap();
136 }
137
138 fn should_recheck(&mut self) -> bool {
139 if let Some(_last_update_req) = &self.last_update_req {
140 // We currently only request an update on save, as we need up to
141 // date source on disk for cargo check to do it's magic, so we
142 // don't really need to debounce the requests at this point.
143 return true;
144 }
145 false
146 }
147
148 fn handle_command(&mut self, cmd: CheckCommand) {
149 match cmd {
150 CheckCommand::Update => self.last_update_req = Some(Instant::now()),
151 }
152 }
153
154 fn handle_message(&self, msg: CheckEvent, task_send: &Sender<CheckTask>) {
155 match msg {
156 CheckEvent::Begin => {
157 task_send
158 .send(CheckTask::Status(WorkDoneProgress::Begin(WorkDoneProgressBegin {
159 title: "Running 'cargo check'".to_string(),
160 cancellable: Some(false),
161 message: None,
162 percentage: None,
163 })))
164 .unwrap();
165 }
166
167 CheckEvent::End => {
168 task_send
169 .send(CheckTask::Status(WorkDoneProgress::End(WorkDoneProgressEnd {
170 message: None,
171 })))
172 .unwrap();
173 }
174
175 CheckEvent::Msg(Message::CompilerArtifact(msg)) => {
176 task_send
177 .send(CheckTask::Status(WorkDoneProgress::Report(WorkDoneProgressReport {
178 cancellable: Some(false),
179 message: Some(msg.target.name),
180 percentage: None,
181 })))
182 .unwrap();
183 }
184
185 CheckEvent::Msg(Message::CompilerMessage(msg)) => {
186 let map_result = map_rust_diagnostic_to_lsp(&msg.message, &self.workspace_root);
187 if map_result.is_empty() {
188 return;
189 }
190
191 for MappedRustDiagnostic { location, diagnostic, fixes } in map_result {
192 let fixes = fixes
193 .into_iter()
194 .map(|fix| {
195 CodeAction { diagnostics: Some(vec![diagnostic.clone()]), ..fix }.into()
196 })
197 .collect();
198
199 task_send
200 .send(CheckTask::AddDiagnostic { url: location.uri, diagnostic, fixes })
201 .unwrap();
202 }
203 }
204
205 CheckEvent::Msg(Message::BuildScriptExecuted(_msg)) => {}
206 CheckEvent::Msg(Message::Unknown) => {}
207 }
208 }
209}
210
211#[derive(Debug)]
212pub struct DiagnosticWithFixes {
213 diagnostic: Diagnostic,
214 fixes: Vec<CodeAction>,
215}
216
217/// WatchThread exists to wrap around the communication needed to be able to
218/// run `cargo check` without blocking. Currently the Rust standard library
219/// doesn't provide a way to read sub-process output without blocking, so we
220/// have to wrap sub-processes output handling in a thread and pass messages
221/// back over a channel.
222/// The correct way to dispose of the thread is to drop it, on which the
223/// sub-process will be killed, and the thread will be joined.
224struct WatchThread {
225 // XXX: drop order is significant
226 message_recv: Receiver<CheckEvent>,
227 _handle: Option<jod_thread::JoinHandle<()>>,
228}
229
230enum CheckEvent {
231 Begin,
232 Msg(cargo_metadata::Message),
233 End,
234}
235
236#[derive(Debug)]
237pub struct CargoError(String);
238
239impl fmt::Display for CargoError {
240 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
241 write!(f, "Cargo failed: {}", self.0)
242 }
243}
244impl error::Error for CargoError {}
245
246fn run_cargo(
247 args: &[String],
248 current_dir: Option<&Path>,
249 on_message: &mut dyn FnMut(cargo_metadata::Message) -> bool,
250) -> Result<(), CargoError> {
251 let mut command = Command::new("cargo");
252 if let Some(current_dir) = current_dir {
253 command.current_dir(current_dir);
254 }
255
256 let mut child = command
257 .args(args)
258 .stdout(Stdio::piped())
259 .stderr(Stdio::null())
260 .stdin(Stdio::null())
261 .spawn()
262 .expect("couldn't launch cargo");
263
264 // We manually read a line at a time, instead of using serde's
265 // stream deserializers, because the deserializer cannot recover
266 // from an error, resulting in it getting stuck, because we try to
267 // be resillient against failures.
268 //
269 // Because cargo only outputs one JSON object per line, we can
270 // simply skip a line if it doesn't parse, which just ignores any
271 // erroneus output.
272 let stdout = BufReader::new(child.stdout.take().unwrap());
273 let mut read_at_least_one_message = false;
274
275 for line in stdout.lines() {
276 let line = match line {
277 Ok(line) => line,
278 Err(err) => {
279 log::error!("Couldn't read line from cargo: {}", err);
280 continue;
281 }
282 };
283
284 let message = serde_json::from_str::<cargo_metadata::Message>(&line);
285 let message = match message {
286 Ok(message) => message,
287 Err(err) => {
288 log::error!("Invalid json from cargo check, ignoring ({}): {:?} ", err, line);
289 continue;
290 }
291 };
292
293 read_at_least_one_message = true;
294
295 if !on_message(message) {
296 break;
297 }
298 }
299
300 // It is okay to ignore the result, as it only errors if the process is already dead
301 let _ = child.kill();
302
303 let err_msg = match child.wait() {
304 Ok(exit_code) if !exit_code.success() && !read_at_least_one_message => {
305 // FIXME: Read the stderr to display the reason, see `read2()` reference in PR comment:
306 // https://github.com/rust-analyzer/rust-analyzer/pull/3632#discussion_r395605298
307 format!(
308 "the command produced no valid metadata (exit code: {:?}): cargo {}",
309 exit_code,
310 args.join(" ")
311 )
312 }
313 Err(err) => format!("io error: {:?}", err),
314 Ok(_) => return Ok(()),
315 };
316
317 Err(CargoError(err_msg))
318}
319
320impl WatchThread {
321 fn dummy() -> WatchThread {
322 WatchThread { message_recv: never(), _handle: None }
323 }
324
325 fn new(options: &CheckConfig, workspace_root: &Path) -> WatchThread {
326 let mut args: Vec<String> = vec![
327 options.command.clone(),
328 "--workspace".to_string(),
329 "--message-format=json".to_string(),
330 "--manifest-path".to_string(),
331 format!("{}/Cargo.toml", workspace_root.display()),
332 ];
333 if options.all_targets {
334 args.push("--all-targets".to_string());
335 }
336 args.extend(options.args.iter().cloned());
337
338 let (message_send, message_recv) = unbounded();
339 let workspace_root = workspace_root.to_owned();
340 let handle = if options.enable {
341 Some(jod_thread::spawn(move || {
342 // If we trigger an error here, we will do so in the loop instead,
343 // which will break out of the loop, and continue the shutdown
344 let _ = message_send.send(CheckEvent::Begin);
345
346 let res = run_cargo(&args, Some(&workspace_root), &mut |message| {
347 // Skip certain kinds of messages to only spend time on what's useful
348 match &message {
349 Message::CompilerArtifact(artifact) if artifact.fresh => return true,
350 Message::BuildScriptExecuted(_) => return true,
351 Message::Unknown => return true,
352 _ => {}
353 }
354
355 // if the send channel was closed, we want to shutdown
356 message_send.send(CheckEvent::Msg(message)).is_ok()
357 });
358
359 if let Err(err) = res {
360 // FIXME: make the `message_send` to be `Sender<Result<CheckEvent, CargoError>>`
361 // to display user-caused misconfiguration errors instead of just logging them here
362 log::error!("Cargo watcher failed {:?}", err);
363 }
364
365 // We can ignore any error here, as we are already in the progress
366 // of shutting down.
367 let _ = message_send.send(CheckEvent::End);
368 }))
369 } else {
370 None
371 };
372 WatchThread { message_recv, _handle: handle }
373 }
374}