aboutsummaryrefslogtreecommitdiff
path: root/crates/ra_lsp_server/src/main_loop.rs
diff options
context:
space:
mode:
Diffstat (limited to 'crates/ra_lsp_server/src/main_loop.rs')
-rw-r--r--crates/ra_lsp_server/src/main_loop.rs43
1 files changed, 30 insertions, 13 deletions
diff --git a/crates/ra_lsp_server/src/main_loop.rs b/crates/ra_lsp_server/src/main_loop.rs
index 7822be2e2..15bf519c9 100644
--- a/crates/ra_lsp_server/src/main_loop.rs
+++ b/crates/ra_lsp_server/src/main_loop.rs
@@ -29,9 +29,6 @@ use crate::{
29 Result, ServerConfig, 29 Result, ServerConfig,
30}; 30};
31 31
32const THREADPOOL_SIZE: usize = 8;
33const MAX_IN_FLIGHT_LIBS: usize = THREADPOOL_SIZE - 3;
34
35#[derive(Debug)] 32#[derive(Debug)]
36pub struct LspError { 33pub struct LspError {
37 pub code: i32, 34 pub code: i32,
@@ -60,6 +57,25 @@ pub fn main_loop(
60) -> Result<()> { 57) -> Result<()> {
61 log::info!("server_config: {:#?}", config); 58 log::info!("server_config: {:#?}", config);
62 59
60 // Windows scheduler implements priority boosts: if thread waits for an
61 // event (like a condvar), and event fires, priority of the thread is
62 // temporary bumped. This optimization backfires in our case: each time the
63 // `main_loop` schedules a task to run on a threadpool, the worker threads
64 // gets a higher priority, and (on a machine with fewer cores) displaces the
65 // main loop! We work-around this by marking the main loop as a
66 // higher-priority thread.
67 //
68 // https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities
69 // https://docs.microsoft.com/en-us/windows/win32/procthread/priority-boosts
70 // https://github.com/rust-analyzer/rust-analyzer/issues/2835
71 #[cfg(windows)]
72 unsafe {
73 use winapi::um::processthreadsapi::*;
74 let thread = GetCurrentThread();
75 let thread_priority_above_normal = 1;
76 SetThreadPriority(thread, thread_priority_above_normal);
77 }
78
63 let mut loop_state = LoopState::default(); 79 let mut loop_state = LoopState::default();
64 let mut world_state = { 80 let mut world_state = {
65 let feature_flags = { 81 let feature_flags = {
@@ -168,7 +184,7 @@ pub fn main_loop(
168 ) 184 )
169 }; 185 };
170 186
171 let pool = ThreadPool::new(THREADPOOL_SIZE); 187 let pool = ThreadPool::default();
172 let (task_sender, task_receiver) = unbounded::<Task>(); 188 let (task_sender, task_receiver) = unbounded::<Task>();
173 let (libdata_sender, libdata_receiver) = unbounded::<LibraryData>(); 189 let (libdata_sender, libdata_receiver) = unbounded::<LibraryData>();
174 190
@@ -210,7 +226,7 @@ pub fn main_loop(
210 )?; 226 )?;
211 } 227 }
212 } 228 }
213 229 world_state.analysis_host.request_cancellation();
214 log::info!("waiting for tasks to finish..."); 230 log::info!("waiting for tasks to finish...");
215 task_receiver.into_iter().for_each(|task| { 231 task_receiver.into_iter().for_each(|task| {
216 on_task(task, &connection.sender, &mut loop_state.pending_requests, &mut world_state) 232 on_task(task, &connection.sender, &mut loop_state.pending_requests, &mut world_state)
@@ -371,7 +387,8 @@ fn loop_turn(
371 loop_state.pending_libraries.extend(changes); 387 loop_state.pending_libraries.extend(changes);
372 } 388 }
373 389
374 while loop_state.in_flight_libraries < MAX_IN_FLIGHT_LIBS 390 let max_in_flight_libs = pool.max_count().saturating_sub(2).max(1);
391 while loop_state.in_flight_libraries < max_in_flight_libs
375 && !loop_state.pending_libraries.is_empty() 392 && !loop_state.pending_libraries.is_empty()
376 { 393 {
377 let (root, files) = loop_state.pending_libraries.pop().unwrap(); 394 let (root, files) = loop_state.pending_libraries.pop().unwrap();
@@ -586,12 +603,14 @@ fn on_notification(
586 603
587fn on_check_task( 604fn on_check_task(
588 task: CheckTask, 605 task: CheckTask,
589 world_state: &WorldState, 606 world_state: &mut WorldState,
590 task_sender: &Sender<Task>, 607 task_sender: &Sender<Task>,
591) -> Result<()> { 608) -> Result<()> {
592 match task { 609 match task {
593 CheckTask::ClearDiagnostics => { 610 CheckTask::ClearDiagnostics => {
594 let cleared_files = world_state.check_watcher.state.write().clear(); 611 let state = Arc::get_mut(&mut world_state.check_watcher.state)
612 .expect("couldn't get check watcher state as mutable");
613 let cleared_files = state.clear();
595 614
596 // Send updated diagnostics for each cleared file 615 // Send updated diagnostics for each cleared file
597 for url in cleared_files { 616 for url in cleared_files {
@@ -600,11 +619,9 @@ fn on_check_task(
600 } 619 }
601 620
602 CheckTask::AddDiagnostic(url, diagnostic) => { 621 CheckTask::AddDiagnostic(url, diagnostic) => {
603 world_state 622 let state = Arc::get_mut(&mut world_state.check_watcher.state)
604 .check_watcher 623 .expect("couldn't get check watcher state as mutable");
605 .state 624 state.add_diagnostic_with_fixes(url.clone(), diagnostic);
606 .write()
607 .add_diagnostic_with_fixes(url.clone(), diagnostic);
608 625
609 // We manually send a diagnostic update when the watcher asks 626 // We manually send a diagnostic update when the watcher asks
610 // us to, to avoid the issue of having to change the file to 627 // us to, to avoid the issue of having to change the file to