diff options
Diffstat (limited to 'crates/ra_cargo_watch/src/lib.rs')
-rw-r--r-- | crates/ra_cargo_watch/src/lib.rs | 345 |
1 files changed, 345 insertions, 0 deletions
diff --git a/crates/ra_cargo_watch/src/lib.rs b/crates/ra_cargo_watch/src/lib.rs new file mode 100644 index 000000000..c86386610 --- /dev/null +++ b/crates/ra_cargo_watch/src/lib.rs | |||
@@ -0,0 +1,345 @@ | |||
1 | //! cargo_check provides the functionality needed to run `cargo check` or | ||
2 | //! another compatible command (f.x. clippy) in a background thread and provide | ||
3 | //! LSP diagnostics based on the output of the command. | ||
4 | use cargo_metadata::Message; | ||
5 | use crossbeam_channel::{select, unbounded, Receiver, RecvError, Sender, TryRecvError}; | ||
6 | use lsp_types::{ | ||
7 | Diagnostic, Url, WorkDoneProgress, WorkDoneProgressBegin, WorkDoneProgressEnd, | ||
8 | WorkDoneProgressReport, | ||
9 | }; | ||
10 | use parking_lot::RwLock; | ||
11 | use std::{ | ||
12 | collections::HashMap, | ||
13 | path::PathBuf, | ||
14 | process::{Command, Stdio}, | ||
15 | sync::Arc, | ||
16 | thread::JoinHandle, | ||
17 | time::Instant, | ||
18 | }; | ||
19 | |||
20 | mod conv; | ||
21 | |||
22 | use crate::conv::{map_rust_diagnostic_to_lsp, MappedRustDiagnostic, SuggestedFix}; | ||
23 | |||
24 | #[derive(Clone, Debug)] | ||
25 | pub struct CheckOptions { | ||
26 | pub enable: bool, | ||
27 | pub args: Vec<String>, | ||
28 | pub command: String, | ||
29 | pub all_targets: bool, | ||
30 | } | ||
31 | |||
32 | /// CheckWatcher wraps the shared state and communication machinery used for | ||
33 | /// running `cargo check` (or other compatible command) and providing | ||
34 | /// diagnostics based on the output. | ||
35 | #[derive(Debug)] | ||
36 | pub struct CheckWatcher { | ||
37 | pub task_recv: Receiver<CheckTask>, | ||
38 | pub cmd_send: Sender<CheckCommand>, | ||
39 | pub shared: Arc<RwLock<CheckWatcherSharedState>>, | ||
40 | handle: JoinHandle<()>, | ||
41 | } | ||
42 | |||
43 | impl CheckWatcher { | ||
44 | pub fn new(options: &CheckOptions, workspace_root: PathBuf) -> CheckWatcher { | ||
45 | let options = options.clone(); | ||
46 | let shared = Arc::new(RwLock::new(CheckWatcherSharedState::new())); | ||
47 | |||
48 | let (task_send, task_recv) = unbounded::<CheckTask>(); | ||
49 | let (cmd_send, cmd_recv) = unbounded::<CheckCommand>(); | ||
50 | let shared_ = shared.clone(); | ||
51 | let handle = std::thread::spawn(move || { | ||
52 | let mut check = CheckWatcherState::new(options, workspace_root, shared_); | ||
53 | check.run(&task_send, &cmd_recv); | ||
54 | }); | ||
55 | |||
56 | CheckWatcher { task_recv, cmd_send, handle, shared } | ||
57 | } | ||
58 | |||
59 | /// Schedule a re-start of the cargo check worker. | ||
60 | pub fn update(&self) { | ||
61 | self.cmd_send.send(CheckCommand::Update).unwrap(); | ||
62 | } | ||
63 | } | ||
64 | |||
65 | pub struct CheckWatcherState { | ||
66 | options: CheckOptions, | ||
67 | workspace_root: PathBuf, | ||
68 | running: bool, | ||
69 | watcher: WatchThread, | ||
70 | last_update_req: Option<Instant>, | ||
71 | shared: Arc<RwLock<CheckWatcherSharedState>>, | ||
72 | } | ||
73 | |||
74 | #[derive(Debug)] | ||
75 | pub struct CheckWatcherSharedState { | ||
76 | diagnostic_collection: HashMap<Url, Vec<Diagnostic>>, | ||
77 | suggested_fix_collection: HashMap<Url, Vec<SuggestedFix>>, | ||
78 | } | ||
79 | |||
80 | impl CheckWatcherSharedState { | ||
81 | fn new() -> CheckWatcherSharedState { | ||
82 | CheckWatcherSharedState { | ||
83 | diagnostic_collection: HashMap::new(), | ||
84 | suggested_fix_collection: HashMap::new(), | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /// Clear the cached diagnostics, and schedule updating diagnostics by the | ||
89 | /// server, to clear stale results. | ||
90 | pub fn clear(&mut self, task_send: &Sender<CheckTask>) { | ||
91 | let cleared_files: Vec<Url> = self.diagnostic_collection.keys().cloned().collect(); | ||
92 | |||
93 | self.diagnostic_collection.clear(); | ||
94 | self.suggested_fix_collection.clear(); | ||
95 | |||
96 | for uri in cleared_files { | ||
97 | task_send.send(CheckTask::Update(uri.clone())).unwrap(); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | pub fn diagnostics_for(&self, uri: &Url) -> Option<&[Diagnostic]> { | ||
102 | self.diagnostic_collection.get(uri).map(|d| d.as_slice()) | ||
103 | } | ||
104 | |||
105 | pub fn fixes_for(&self, uri: &Url) -> Option<&[SuggestedFix]> { | ||
106 | self.suggested_fix_collection.get(uri).map(|d| d.as_slice()) | ||
107 | } | ||
108 | |||
109 | fn add_diagnostic(&mut self, file_uri: Url, diagnostic: Diagnostic) { | ||
110 | let diagnostics = self.diagnostic_collection.entry(file_uri).or_default(); | ||
111 | |||
112 | // If we're building multiple targets it's possible we've already seen this diagnostic | ||
113 | let is_duplicate = diagnostics.iter().any(|d| are_diagnostics_equal(d, &diagnostic)); | ||
114 | if is_duplicate { | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | diagnostics.push(diagnostic); | ||
119 | } | ||
120 | |||
121 | fn add_suggested_fix_for_diagnostic( | ||
122 | &mut self, | ||
123 | mut suggested_fix: SuggestedFix, | ||
124 | diagnostic: &Diagnostic, | ||
125 | ) { | ||
126 | let file_uri = suggested_fix.location.uri.clone(); | ||
127 | let file_suggestions = self.suggested_fix_collection.entry(file_uri).or_default(); | ||
128 | |||
129 | let existing_suggestion: Option<&mut SuggestedFix> = | ||
130 | file_suggestions.iter_mut().find(|s| s == &&suggested_fix); | ||
131 | if let Some(existing_suggestion) = existing_suggestion { | ||
132 | // The existing suggestion also applies to this new diagnostic | ||
133 | existing_suggestion.diagnostics.push(diagnostic.clone()); | ||
134 | } else { | ||
135 | // We haven't seen this suggestion before | ||
136 | suggested_fix.diagnostics.push(diagnostic.clone()); | ||
137 | file_suggestions.push(suggested_fix); | ||
138 | } | ||
139 | } | ||
140 | } | ||
141 | |||
142 | #[derive(Debug)] | ||
143 | pub enum CheckTask { | ||
144 | /// Request a update of the given files diagnostics | ||
145 | Update(Url), | ||
146 | |||
147 | /// Request check progress notification to client | ||
148 | Status(WorkDoneProgress), | ||
149 | } | ||
150 | |||
151 | pub enum CheckCommand { | ||
152 | /// Request re-start of check thread | ||
153 | Update, | ||
154 | } | ||
155 | |||
156 | impl CheckWatcherState { | ||
157 | pub fn new( | ||
158 | options: CheckOptions, | ||
159 | workspace_root: PathBuf, | ||
160 | shared: Arc<RwLock<CheckWatcherSharedState>>, | ||
161 | ) -> CheckWatcherState { | ||
162 | let watcher = WatchThread::new(&options, &workspace_root); | ||
163 | CheckWatcherState { | ||
164 | options, | ||
165 | workspace_root, | ||
166 | running: false, | ||
167 | watcher, | ||
168 | last_update_req: None, | ||
169 | shared, | ||
170 | } | ||
171 | } | ||
172 | |||
173 | pub fn run(&mut self, task_send: &Sender<CheckTask>, cmd_recv: &Receiver<CheckCommand>) { | ||
174 | self.running = true; | ||
175 | while self.running { | ||
176 | select! { | ||
177 | recv(&cmd_recv) -> cmd => match cmd { | ||
178 | Ok(cmd) => self.handle_command(cmd), | ||
179 | Err(RecvError) => { | ||
180 | // Command channel has closed, so shut down | ||
181 | self.running = false; | ||
182 | }, | ||
183 | }, | ||
184 | recv(self.watcher.message_recv) -> msg => match msg { | ||
185 | Ok(msg) => self.handle_message(msg, task_send), | ||
186 | Err(RecvError) => {}, | ||
187 | } | ||
188 | }; | ||
189 | |||
190 | if self.should_recheck() { | ||
191 | self.last_update_req.take(); | ||
192 | self.shared.write().clear(task_send); | ||
193 | |||
194 | self.watcher.cancel(); | ||
195 | self.watcher = WatchThread::new(&self.options, &self.workspace_root); | ||
196 | } | ||
197 | } | ||
198 | } | ||
199 | |||
200 | fn should_recheck(&mut self) -> bool { | ||
201 | if let Some(_last_update_req) = &self.last_update_req { | ||
202 | // We currently only request an update on save, as we need up to | ||
203 | // date source on disk for cargo check to do it's magic, so we | ||
204 | // don't really need to debounce the requests at this point. | ||
205 | return true; | ||
206 | } | ||
207 | false | ||
208 | } | ||
209 | |||
210 | fn handle_command(&mut self, cmd: CheckCommand) { | ||
211 | match cmd { | ||
212 | CheckCommand::Update => self.last_update_req = Some(Instant::now()), | ||
213 | } | ||
214 | } | ||
215 | |||
216 | fn handle_message(&mut self, msg: CheckEvent, task_send: &Sender<CheckTask>) { | ||
217 | match msg { | ||
218 | CheckEvent::Begin => { | ||
219 | task_send | ||
220 | .send(CheckTask::Status(WorkDoneProgress::Begin(WorkDoneProgressBegin { | ||
221 | title: "Running 'cargo check'".to_string(), | ||
222 | cancellable: Some(false), | ||
223 | message: None, | ||
224 | percentage: None, | ||
225 | }))) | ||
226 | .unwrap(); | ||
227 | } | ||
228 | |||
229 | CheckEvent::End => { | ||
230 | task_send | ||
231 | .send(CheckTask::Status(WorkDoneProgress::End(WorkDoneProgressEnd { | ||
232 | message: None, | ||
233 | }))) | ||
234 | .unwrap(); | ||
235 | } | ||
236 | |||
237 | CheckEvent::Msg(Message::CompilerArtifact(msg)) => { | ||
238 | task_send | ||
239 | .send(CheckTask::Status(WorkDoneProgress::Report(WorkDoneProgressReport { | ||
240 | cancellable: Some(false), | ||
241 | message: Some(msg.target.name), | ||
242 | percentage: None, | ||
243 | }))) | ||
244 | .unwrap(); | ||
245 | } | ||
246 | |||
247 | CheckEvent::Msg(Message::CompilerMessage(msg)) => { | ||
248 | let map_result = | ||
249 | match map_rust_diagnostic_to_lsp(&msg.message, &self.workspace_root) { | ||
250 | Some(map_result) => map_result, | ||
251 | None => return, | ||
252 | }; | ||
253 | |||
254 | let MappedRustDiagnostic { location, diagnostic, suggested_fixes } = map_result; | ||
255 | let file_uri = location.uri.clone(); | ||
256 | |||
257 | if !suggested_fixes.is_empty() { | ||
258 | for suggested_fix in suggested_fixes { | ||
259 | self.shared | ||
260 | .write() | ||
261 | .add_suggested_fix_for_diagnostic(suggested_fix, &diagnostic); | ||
262 | } | ||
263 | } | ||
264 | self.shared.write().add_diagnostic(file_uri, diagnostic); | ||
265 | |||
266 | task_send.send(CheckTask::Update(location.uri)).unwrap(); | ||
267 | } | ||
268 | |||
269 | CheckEvent::Msg(Message::BuildScriptExecuted(_msg)) => {} | ||
270 | CheckEvent::Msg(Message::Unknown) => {} | ||
271 | } | ||
272 | } | ||
273 | } | ||
274 | |||
275 | /// WatchThread exists to wrap around the communication needed to be able to | ||
276 | /// run `cargo check` without blocking. Currently the Rust standard library | ||
277 | /// doesn't provide a way to read sub-process output without blocking, so we | ||
278 | /// have to wrap sub-processes output handling in a thread and pass messages | ||
279 | /// back over a channel. | ||
280 | struct WatchThread { | ||
281 | message_recv: Receiver<CheckEvent>, | ||
282 | cancel_send: Sender<()>, | ||
283 | } | ||
284 | |||
285 | enum CheckEvent { | ||
286 | Begin, | ||
287 | Msg(cargo_metadata::Message), | ||
288 | End, | ||
289 | } | ||
290 | |||
291 | impl WatchThread { | ||
292 | fn new(options: &CheckOptions, workspace_root: &PathBuf) -> WatchThread { | ||
293 | let mut args: Vec<String> = vec![ | ||
294 | options.command.clone(), | ||
295 | "--message-format=json".to_string(), | ||
296 | "--manifest-path".to_string(), | ||
297 | format!("{}/Cargo.toml", workspace_root.to_string_lossy()), | ||
298 | ]; | ||
299 | if options.all_targets { | ||
300 | args.push("--all-targets".to_string()); | ||
301 | } | ||
302 | args.extend(options.args.iter().cloned()); | ||
303 | |||
304 | let (message_send, message_recv) = unbounded(); | ||
305 | let (cancel_send, cancel_recv) = unbounded(); | ||
306 | let enabled = options.enable; | ||
307 | std::thread::spawn(move || { | ||
308 | if !enabled { | ||
309 | return; | ||
310 | } | ||
311 | |||
312 | let mut command = Command::new("cargo") | ||
313 | .args(&args) | ||
314 | .stdout(Stdio::piped()) | ||
315 | .stderr(Stdio::null()) | ||
316 | .spawn() | ||
317 | .expect("couldn't launch cargo"); | ||
318 | |||
319 | message_send.send(CheckEvent::Begin).unwrap(); | ||
320 | for message in cargo_metadata::parse_messages(command.stdout.take().unwrap()) { | ||
321 | match cancel_recv.try_recv() { | ||
322 | Ok(()) | Err(TryRecvError::Disconnected) => { | ||
323 | command.kill().expect("couldn't kill command"); | ||
324 | } | ||
325 | Err(TryRecvError::Empty) => (), | ||
326 | } | ||
327 | |||
328 | message_send.send(CheckEvent::Msg(message.unwrap())).unwrap(); | ||
329 | } | ||
330 | message_send.send(CheckEvent::End).unwrap(); | ||
331 | }); | ||
332 | WatchThread { message_recv, cancel_send } | ||
333 | } | ||
334 | |||
335 | fn cancel(&self) { | ||
336 | let _ = self.cancel_send.send(()); | ||
337 | } | ||
338 | } | ||
339 | |||
340 | fn are_diagnostics_equal(left: &Diagnostic, right: &Diagnostic) -> bool { | ||
341 | left.source == right.source | ||
342 | && left.severity == right.severity | ||
343 | && left.range == right.range | ||
344 | && left.message == right.message | ||
345 | } | ||