aboutsummaryrefslogtreecommitdiff
path: root/crates/ra_cargo_watch/src/lib.rs
diff options
context:
space:
mode:
Diffstat (limited to 'crates/ra_cargo_watch/src/lib.rs')
-rw-r--r--crates/ra_cargo_watch/src/lib.rs392
1 files changed, 392 insertions, 0 deletions
diff --git a/crates/ra_cargo_watch/src/lib.rs b/crates/ra_cargo_watch/src/lib.rs
new file mode 100644
index 000000000..e5c22e599
--- /dev/null
+++ b/crates/ra_cargo_watch/src/lib.rs
@@ -0,0 +1,392 @@
1//! cargo_check provides the functionality needed to run `cargo check` or
2//! another compatible command (f.x. clippy) in a background thread and provide
3//! LSP diagnostics based on the output of the command.
4use cargo_metadata::Message;
5use crossbeam_channel::{never, select, unbounded, Receiver, RecvError, Sender};
6use lsp_types::{
7 Diagnostic, Url, WorkDoneProgress, WorkDoneProgressBegin, WorkDoneProgressEnd,
8 WorkDoneProgressReport,
9};
10use parking_lot::RwLock;
11use std::{
12 collections::HashMap,
13 path::PathBuf,
14 process::{Command, Stdio},
15 sync::Arc,
16 thread::JoinHandle,
17 time::Instant,
18};
19
20mod conv;
21
22use crate::conv::{map_rust_diagnostic_to_lsp, MappedRustDiagnostic, SuggestedFix};
23
24#[derive(Clone, Debug)]
25pub struct CheckOptions {
26 pub enable: bool,
27 pub args: Vec<String>,
28 pub command: String,
29 pub all_targets: bool,
30}
31
32/// CheckWatcher wraps the shared state and communication machinery used for
33/// running `cargo check` (or other compatible command) and providing
34/// diagnostics based on the output.
35/// The spawned thread is shut down when this struct is dropped.
36#[derive(Debug)]
37pub struct CheckWatcher {
38 pub task_recv: Receiver<CheckTask>,
39 pub shared: Arc<RwLock<CheckWatcherSharedState>>,
40 cmd_send: Option<Sender<CheckCommand>>,
41 handle: Option<JoinHandle<()>>,
42}
43
44impl CheckWatcher {
45 pub fn new(options: &CheckOptions, workspace_root: PathBuf) -> CheckWatcher {
46 let options = options.clone();
47 let shared = Arc::new(RwLock::new(CheckWatcherSharedState::new()));
48
49 let (task_send, task_recv) = unbounded::<CheckTask>();
50 let (cmd_send, cmd_recv) = unbounded::<CheckCommand>();
51 let shared_ = shared.clone();
52 let handle = std::thread::spawn(move || {
53 let mut check = CheckWatcherState::new(options, workspace_root, shared_);
54 check.run(&task_send, &cmd_recv);
55 });
56 CheckWatcher { task_recv, cmd_send: Some(cmd_send), handle: Some(handle), shared }
57 }
58
59 /// Schedule a re-start of the cargo check worker.
60 pub fn update(&self) {
61 if let Some(cmd_send) = &self.cmd_send {
62 cmd_send.send(CheckCommand::Update).unwrap();
63 }
64 }
65}
66
67impl std::ops::Drop for CheckWatcher {
68 fn drop(&mut self) {
69 if let Some(handle) = self.handle.take() {
70 // Take the sender out of the option
71 let recv = self.cmd_send.take();
72
73 // Dropping the sender finishes the thread loop
74 drop(recv);
75
76 // Join the thread, it should finish shortly. We don't really care
77 // whether it panicked, so it is safe to ignore the result
78 let _ = handle.join();
79 }
80 }
81}
82
83#[derive(Debug)]
84pub struct CheckWatcherSharedState {
85 diagnostic_collection: HashMap<Url, Vec<Diagnostic>>,
86 suggested_fix_collection: HashMap<Url, Vec<SuggestedFix>>,
87}
88
89impl CheckWatcherSharedState {
90 fn new() -> CheckWatcherSharedState {
91 CheckWatcherSharedState {
92 diagnostic_collection: HashMap::new(),
93 suggested_fix_collection: HashMap::new(),
94 }
95 }
96
97 /// Clear the cached diagnostics, and schedule updating diagnostics by the
98 /// server, to clear stale results.
99 pub fn clear(&mut self, task_send: &Sender<CheckTask>) {
100 let cleared_files: Vec<Url> = self.diagnostic_collection.keys().cloned().collect();
101
102 self.diagnostic_collection.clear();
103 self.suggested_fix_collection.clear();
104
105 for uri in cleared_files {
106 task_send.send(CheckTask::Update(uri.clone())).unwrap();
107 }
108 }
109
110 pub fn diagnostics_for(&self, uri: &Url) -> Option<&[Diagnostic]> {
111 self.diagnostic_collection.get(uri).map(|d| d.as_slice())
112 }
113
114 pub fn fixes_for(&self, uri: &Url) -> Option<&[SuggestedFix]> {
115 self.suggested_fix_collection.get(uri).map(|d| d.as_slice())
116 }
117
118 fn add_diagnostic(&mut self, file_uri: Url, diagnostic: Diagnostic) {
119 let diagnostics = self.diagnostic_collection.entry(file_uri).or_default();
120
121 // If we're building multiple targets it's possible we've already seen this diagnostic
122 let is_duplicate = diagnostics.iter().any(|d| are_diagnostics_equal(d, &diagnostic));
123 if is_duplicate {
124 return;
125 }
126
127 diagnostics.push(diagnostic);
128 }
129
130 fn add_suggested_fix_for_diagnostic(
131 &mut self,
132 mut suggested_fix: SuggestedFix,
133 diagnostic: &Diagnostic,
134 ) {
135 let file_uri = suggested_fix.location.uri.clone();
136 let file_suggestions = self.suggested_fix_collection.entry(file_uri).or_default();
137
138 let existing_suggestion: Option<&mut SuggestedFix> =
139 file_suggestions.iter_mut().find(|s| s == &&suggested_fix);
140 if let Some(existing_suggestion) = existing_suggestion {
141 // The existing suggestion also applies to this new diagnostic
142 existing_suggestion.diagnostics.push(diagnostic.clone());
143 } else {
144 // We haven't seen this suggestion before
145 suggested_fix.diagnostics.push(diagnostic.clone());
146 file_suggestions.push(suggested_fix);
147 }
148 }
149}
150
151#[derive(Debug)]
152pub enum CheckTask {
153 /// Request a update of the given files diagnostics
154 Update(Url),
155
156 /// Request check progress notification to client
157 Status(WorkDoneProgress),
158}
159
160pub enum CheckCommand {
161 /// Request re-start of check thread
162 Update,
163}
164
165struct CheckWatcherState {
166 options: CheckOptions,
167 workspace_root: PathBuf,
168 watcher: WatchThread,
169 last_update_req: Option<Instant>,
170 shared: Arc<RwLock<CheckWatcherSharedState>>,
171}
172
173impl CheckWatcherState {
174 fn new(
175 options: CheckOptions,
176 workspace_root: PathBuf,
177 shared: Arc<RwLock<CheckWatcherSharedState>>,
178 ) -> CheckWatcherState {
179 let watcher = WatchThread::new(&options, &workspace_root);
180 CheckWatcherState { options, workspace_root, watcher, last_update_req: None, shared }
181 }
182
183 fn run(&mut self, task_send: &Sender<CheckTask>, cmd_recv: &Receiver<CheckCommand>) {
184 loop {
185 select! {
186 recv(&cmd_recv) -> cmd => match cmd {
187 Ok(cmd) => self.handle_command(cmd),
188 Err(RecvError) => {
189 // Command channel has closed, so shut down
190 break;
191 },
192 },
193 recv(self.watcher.message_recv) -> msg => match msg {
194 Ok(msg) => self.handle_message(msg, task_send),
195 Err(RecvError) => {
196 // Watcher finished, replace it with a never channel to
197 // avoid busy-waiting.
198 std::mem::replace(&mut self.watcher.message_recv, never());
199 },
200 }
201 };
202
203 if self.should_recheck() {
204 self.last_update_req.take();
205 self.shared.write().clear(task_send);
206
207 // By replacing the watcher, we drop the previous one which
208 // causes it to shut down automatically.
209 self.watcher = WatchThread::new(&self.options, &self.workspace_root);
210 }
211 }
212 }
213
214 fn should_recheck(&mut self) -> bool {
215 if let Some(_last_update_req) = &self.last_update_req {
216 // We currently only request an update on save, as we need up to
217 // date source on disk for cargo check to do it's magic, so we
218 // don't really need to debounce the requests at this point.
219 return true;
220 }
221 false
222 }
223
224 fn handle_command(&mut self, cmd: CheckCommand) {
225 match cmd {
226 CheckCommand::Update => self.last_update_req = Some(Instant::now()),
227 }
228 }
229
230 fn handle_message(&mut self, msg: CheckEvent, task_send: &Sender<CheckTask>) {
231 match msg {
232 CheckEvent::Begin => {
233 task_send
234 .send(CheckTask::Status(WorkDoneProgress::Begin(WorkDoneProgressBegin {
235 title: "Running 'cargo check'".to_string(),
236 cancellable: Some(false),
237 message: None,
238 percentage: None,
239 })))
240 .unwrap();
241 }
242
243 CheckEvent::End => {
244 task_send
245 .send(CheckTask::Status(WorkDoneProgress::End(WorkDoneProgressEnd {
246 message: None,
247 })))
248 .unwrap();
249 }
250
251 CheckEvent::Msg(Message::CompilerArtifact(msg)) => {
252 task_send
253 .send(CheckTask::Status(WorkDoneProgress::Report(WorkDoneProgressReport {
254 cancellable: Some(false),
255 message: Some(msg.target.name),
256 percentage: None,
257 })))
258 .unwrap();
259 }
260
261 CheckEvent::Msg(Message::CompilerMessage(msg)) => {
262 let map_result =
263 match map_rust_diagnostic_to_lsp(&msg.message, &self.workspace_root) {
264 Some(map_result) => map_result,
265 None => return,
266 };
267
268 let MappedRustDiagnostic { location, diagnostic, suggested_fixes } = map_result;
269 let file_uri = location.uri.clone();
270
271 if !suggested_fixes.is_empty() {
272 for suggested_fix in suggested_fixes {
273 self.shared
274 .write()
275 .add_suggested_fix_for_diagnostic(suggested_fix, &diagnostic);
276 }
277 }
278 self.shared.write().add_diagnostic(file_uri, diagnostic);
279
280 task_send.send(CheckTask::Update(location.uri)).unwrap();
281 }
282
283 CheckEvent::Msg(Message::BuildScriptExecuted(_msg)) => {}
284 CheckEvent::Msg(Message::Unknown) => {}
285 }
286 }
287}
288
289/// WatchThread exists to wrap around the communication needed to be able to
290/// run `cargo check` without blocking. Currently the Rust standard library
291/// doesn't provide a way to read sub-process output without blocking, so we
292/// have to wrap sub-processes output handling in a thread and pass messages
293/// back over a channel.
294/// The correct way to dispose of the thread is to drop it, on which the
295/// sub-process will be killed, and the thread will be joined.
296struct WatchThread {
297 handle: Option<JoinHandle<()>>,
298 message_recv: Receiver<CheckEvent>,
299}
300
301enum CheckEvent {
302 Begin,
303 Msg(cargo_metadata::Message),
304 End,
305}
306
307impl WatchThread {
308 fn new(options: &CheckOptions, workspace_root: &PathBuf) -> WatchThread {
309 let mut args: Vec<String> = vec![
310 options.command.clone(),
311 "--message-format=json".to_string(),
312 "--manifest-path".to_string(),
313 format!("{}/Cargo.toml", workspace_root.to_string_lossy()),
314 ];
315 if options.all_targets {
316 args.push("--all-targets".to_string());
317 }
318 args.extend(options.args.iter().cloned());
319
320 let (message_send, message_recv) = unbounded();
321 let enabled = options.enable;
322 let handle = std::thread::spawn(move || {
323 if !enabled {
324 return;
325 }
326
327 let mut command = Command::new("cargo")
328 .args(&args)
329 .stdout(Stdio::piped())
330 .stderr(Stdio::null())
331 .spawn()
332 .expect("couldn't launch cargo");
333
334 // If we trigger an error here, we will do so in the loop instead,
335 // which will break out of the loop, and continue the shutdown
336 let _ = message_send.send(CheckEvent::Begin);
337
338 for message in cargo_metadata::parse_messages(command.stdout.take().unwrap()) {
339 let message = match message {
340 Ok(message) => message,
341 Err(err) => {
342 log::error!("Invalid json from cargo check, ignoring: {}", err);
343 continue;
344 }
345 };
346
347 match message_send.send(CheckEvent::Msg(message)) {
348 Ok(()) => {}
349 Err(_err) => {
350 // The send channel was closed, so we want to shutdown
351 break;
352 }
353 }
354 }
355
356 // We can ignore any error here, as we are already in the progress
357 // of shutting down.
358 let _ = message_send.send(CheckEvent::End);
359
360 // It is okay to ignore the result, as it only errors if the process is already dead
361 let _ = command.kill();
362
363 // Again, we don't care about the exit status so just ignore the result
364 let _ = command.wait();
365 });
366 WatchThread { handle: Some(handle), message_recv }
367 }
368}
369
370impl std::ops::Drop for WatchThread {
371 fn drop(&mut self) {
372 if let Some(handle) = self.handle.take() {
373 // Replace our reciever with dummy one, so we can drop and close the
374 // one actually communicating with the thread
375 let recv = std::mem::replace(&mut self.message_recv, never());
376
377 // Dropping the original reciever initiates thread sub-process shutdown
378 drop(recv);
379
380 // Join the thread, it should finish shortly. We don't really care
381 // whether it panicked, so it is safe to ignore the result
382 let _ = handle.join();
383 }
384 }
385}
386
387fn are_diagnostics_equal(left: &Diagnostic, right: &Diagnostic) -> bool {
388 left.source == right.source
389 && left.severity == right.severity
390 && left.range == right.range
391 && left.message == right.message
392}