aboutsummaryrefslogtreecommitdiff
path: root/crates/ra_lsp_server/src
diff options
context:
space:
mode:
authorbors[bot] <26634292+bors[bot]@users.noreply.github.com>2020-01-15 15:37:28 +0000
committerGitHub <[email protected]>2020-01-15 15:37:28 +0000
commitc0661ce7444223b0fff1f5d54adb41022ab788cb (patch)
tree24aa00d87f5a90d10d0ec37178331ec1c3f9428a /crates/ra_lsp_server/src
parent80136367682ced76fb1a74403435c742685620c8 (diff)
parent7a8c6351bf50c1dcfb111be9f91da3c1f9cf2ec3 (diff)
Merge #2853
2853: Manage `cargo check` state updates in `main_loop` to reduce lock contention r=matklad a=kiljacken State is now updated exclusively from `main_loop` so several threads theoretically can't compete for the lock. Updates to the state are requested via the existing task channel. Also updates some naming to make slightly more sense. Based upon an idea/suggestion from @matklad on Zulip: > I think I've noticed at leas something suspicious! > > In WorldSnapshot, we store an Arc<RwLock<CheckWatcherSharedState>>. We read lock this lock in handle_diagnostics. > > Additionally, we .write this lock from the watcher thread in CheckWatcherState::run. > > I think in general this is less then ideal, b/c diagnostics request can be blocked on another thread. I think it makes sense to architect this in a way which does not block. > > For that, we stop sharing the state between ServerWorld and CheckWatcherState. Instead, the watcher thread sends new diagnostics via a channel, and we accomodate thouse diagnostics intot he server state in the main loop. > > So, instead of: > ```rust > struct Server { > diagnostics: Arc<Mutex<Vec<Diagnostics>>>, > } > > struct Watcher { > diagnostics: Arc<Mutex<Vec<Diagnostics>>>, > } > ``` > we'll have something like this: > ```rust > struct Server { > // this bit now *owns* diagnostics > diagnostisc: Vec<Diagnostics> > } > > struct Watcher { > diagnostics_sink: Sender<Vec<Diagnostics>>, > } > ``` > I am not sure this is the cuprit of slowness on widnows, but I think we should fix it, because it's very useful when all changes to the server's state can occur only via the main loop. > > Note how VFS is set up in a similar way: instead of modifing some global hash map with files, VFS sends a message to the main looop that hey, I have these new files for you. The main loop than incorporates the changes itself. > > Note that I think we'll still need some locks here, to share the state between ServerWorld and WorldSnapshot, but we won't actually be changing anyting mid-snapshot Co-authored-by: Emil Lauridsen <[email protected]>
Diffstat (limited to 'crates/ra_lsp_server/src')
-rw-r--r--crates/ra_lsp_server/src/main_loop.rs79
-rw-r--r--crates/ra_lsp_server/src/world.rs6
2 files changed, 59 insertions, 26 deletions
diff --git a/crates/ra_lsp_server/src/main_loop.rs b/crates/ra_lsp_server/src/main_loop.rs
index 84012b99d..7822be2e2 100644
--- a/crates/ra_lsp_server/src/main_loop.rs
+++ b/crates/ra_lsp_server/src/main_loop.rs
@@ -9,7 +9,7 @@ use std::{error::Error, fmt, panic, path::PathBuf, sync::Arc, time::Instant};
9 9
10use crossbeam_channel::{select, unbounded, RecvError, Sender}; 10use crossbeam_channel::{select, unbounded, RecvError, Sender};
11use lsp_server::{Connection, ErrorCode, Message, Notification, Request, RequestId, Response}; 11use lsp_server::{Connection, ErrorCode, Message, Notification, Request, RequestId, Response};
12use lsp_types::{ClientCapabilities, NumberOrString}; 12use lsp_types::{ClientCapabilities, NumberOrString, Url};
13use ra_cargo_watch::{CheckOptions, CheckTask}; 13use ra_cargo_watch::{CheckOptions, CheckTask};
14use ra_ide::{Canceled, FeatureFlags, FileId, LibraryData, SourceRootId}; 14use ra_ide::{Canceled, FeatureFlags, FileId, LibraryData, SourceRootId};
15use ra_prof::profile; 15use ra_prof::profile;
@@ -336,28 +336,7 @@ fn loop_turn(
336 world_state.maybe_collect_garbage(); 336 world_state.maybe_collect_garbage();
337 loop_state.in_flight_libraries -= 1; 337 loop_state.in_flight_libraries -= 1;
338 } 338 }
339 Event::CheckWatcher(task) => match task { 339 Event::CheckWatcher(task) => on_check_task(task, world_state, task_sender)?,
340 CheckTask::Update(uri) => {
341 // We manually send a diagnostic update when the watcher asks
342 // us to, to avoid the issue of having to change the file to
343 // receive updated diagnostics.
344 let path = uri.to_file_path().map_err(|()| format!("invalid uri: {}", uri))?;
345 if let Some(file_id) = world_state.vfs.read().path2file(&path) {
346 let params =
347 handlers::publish_diagnostics(&world_state.snapshot(), FileId(file_id.0))?;
348 let not = notification_new::<req::PublishDiagnostics>(params);
349 task_sender.send(Task::Notify(not)).unwrap();
350 }
351 }
352 CheckTask::Status(progress) => {
353 let params = req::ProgressParams {
354 token: req::ProgressToken::String("rustAnalyzer/cargoWatcher".to_string()),
355 value: req::ProgressParamsValue::WorkDone(progress),
356 };
357 let not = notification_new::<req::Progress>(params);
358 task_sender.send(Task::Notify(not)).unwrap();
359 }
360 },
361 Event::Msg(msg) => match msg { 340 Event::Msg(msg) => match msg {
362 Message::Request(req) => on_request( 341 Message::Request(req) => on_request(
363 world_state, 342 world_state,
@@ -605,6 +584,60 @@ fn on_notification(
605 Ok(()) 584 Ok(())
606} 585}
607 586
587fn on_check_task(
588 task: CheckTask,
589 world_state: &WorldState,
590 task_sender: &Sender<Task>,
591) -> Result<()> {
592 match task {
593 CheckTask::ClearDiagnostics => {
594 let cleared_files = world_state.check_watcher.state.write().clear();
595
596 // Send updated diagnostics for each cleared file
597 for url in cleared_files {
598 publish_diagnostics_for_url(&url, world_state, task_sender)?;
599 }
600 }
601
602 CheckTask::AddDiagnostic(url, diagnostic) => {
603 world_state
604 .check_watcher
605 .state
606 .write()
607 .add_diagnostic_with_fixes(url.clone(), diagnostic);
608
609 // We manually send a diagnostic update when the watcher asks
610 // us to, to avoid the issue of having to change the file to
611 // receive updated diagnostics.
612 publish_diagnostics_for_url(&url, world_state, task_sender)?;
613 }
614
615 CheckTask::Status(progress) => {
616 let params = req::ProgressParams {
617 token: req::ProgressToken::String("rustAnalyzer/cargoWatcher".to_string()),
618 value: req::ProgressParamsValue::WorkDone(progress),
619 };
620 let not = notification_new::<req::Progress>(params);
621 task_sender.send(Task::Notify(not)).unwrap();
622 }
623 }
624 Ok(())
625}
626
627fn publish_diagnostics_for_url(
628 url: &Url,
629 world_state: &WorldState,
630 task_sender: &Sender<Task>,
631) -> Result<()> {
632 let path = url.to_file_path().map_err(|()| format!("invalid uri: {}", url))?;
633 if let Some(file_id) = world_state.vfs.read().path2file(&path) {
634 let params = handlers::publish_diagnostics(&world_state.snapshot(), FileId(file_id.0))?;
635 let not = notification_new::<req::PublishDiagnostics>(params);
636 task_sender.send(Task::Notify(not)).unwrap();
637 }
638 Ok(())
639}
640
608struct PoolDispatcher<'a> { 641struct PoolDispatcher<'a> {
609 req: Option<Request>, 642 req: Option<Request>,
610 pool: &'a ThreadPool, 643 pool: &'a ThreadPool,
diff --git a/crates/ra_lsp_server/src/world.rs b/crates/ra_lsp_server/src/world.rs
index c0175c726..7a3030a51 100644
--- a/crates/ra_lsp_server/src/world.rs
+++ b/crates/ra_lsp_server/src/world.rs
@@ -13,7 +13,7 @@ use lsp_server::ErrorCode;
13use lsp_types::Url; 13use lsp_types::Url;
14use parking_lot::RwLock; 14use parking_lot::RwLock;
15use ra_cargo_watch::{ 15use ra_cargo_watch::{
16 url_from_path_with_drive_lowercasing, CheckOptions, CheckWatcher, CheckWatcherSharedState, 16 url_from_path_with_drive_lowercasing, CheckOptions, CheckState, CheckWatcher,
17}; 17};
18use ra_ide::{ 18use ra_ide::{
19 Analysis, AnalysisChange, AnalysisHost, CrateGraph, FeatureFlags, FileId, LibraryData, 19 Analysis, AnalysisChange, AnalysisHost, CrateGraph, FeatureFlags, FileId, LibraryData,
@@ -64,7 +64,7 @@ pub struct WorldSnapshot {
64 pub analysis: Analysis, 64 pub analysis: Analysis,
65 pub vfs: Arc<RwLock<Vfs>>, 65 pub vfs: Arc<RwLock<Vfs>>,
66 pub latest_requests: Arc<RwLock<LatestRequests>>, 66 pub latest_requests: Arc<RwLock<LatestRequests>>,
67 pub check_watcher: Arc<RwLock<CheckWatcherSharedState>>, 67 pub check_watcher: Arc<RwLock<CheckState>>,
68} 68}
69 69
70impl WorldState { 70impl WorldState {
@@ -220,7 +220,7 @@ impl WorldState {
220 analysis: self.analysis_host.analysis(), 220 analysis: self.analysis_host.analysis(),
221 vfs: Arc::clone(&self.vfs), 221 vfs: Arc::clone(&self.vfs),
222 latest_requests: Arc::clone(&self.latest_requests), 222 latest_requests: Arc::clone(&self.latest_requests),
223 check_watcher: self.check_watcher.shared.clone(), 223 check_watcher: self.check_watcher.state.clone(),
224 } 224 }
225 } 225 }
226 226