aboutsummaryrefslogtreecommitdiff
path: root/crates
diff options
context:
space:
mode:
authorbors[bot] <26634292+bors[bot]@users.noreply.github.com>2020-11-02 17:12:08 +0000
committerGitHub <[email protected]>2020-11-02 17:12:08 +0000
commiteb4e84ff510b9e5ead529dba83fc1679a569c448 (patch)
treef39809991b96a63e43c8bab5b8a0a0479e97293c /crates
parent4e80002aceadcff666f800d8bea7bd6e3d977946 (diff)
parenta968cb6aef24179e0c0a7f3d261fbec14e2c00d7 (diff)
Merge #6441
6441: Coalesce prime_caches updates r=matklad a=jonas-schievink This reduces the number of progress bar updates we send to the client by collapsing subsequent updates into one. This doesn't work as well as I'd hoped (which is that we end up sending *no* updates, or only `start` and `end`, when the cache is already fresh), but it does reduce the number considerably: instead of ~720 updates on the rust-analyzer codebase, we now only send ~60. It uses the same approach that is already in use for coalescing VFS events. Hopefully this is enough to fix https://github.com/rust-analyzer/rust-analyzer/issues/6413. Co-authored-by: Jonas Schievink <[email protected]>
Diffstat (limited to 'crates')
-rw-r--r--crates/rust-analyzer/src/main_loop.rs43
1 files changed, 34 insertions, 9 deletions
diff --git a/crates/rust-analyzer/src/main_loop.rs b/crates/rust-analyzer/src/main_loop.rs
index 53f8ca194..4ab5bc6eb 100644
--- a/crates/rust-analyzer/src/main_loop.rs
+++ b/crates/rust-analyzer/src/main_loop.rs
@@ -190,15 +190,35 @@ impl GlobalState {
190 } 190 }
191 lsp_server::Message::Response(resp) => self.complete_request(resp), 191 lsp_server::Message::Response(resp) => self.complete_request(resp),
192 }, 192 },
193 Event::Task(task) => match task { 193 Event::Task(mut task) => {
194 Task::Response(response) => self.respond(response), 194 let _p = profile::span("GlobalState::handle_event/task");
195 Task::Diagnostics(diagnostics_per_file) => { 195 let mut prime_caches_started = false;
196 for (file_id, diagnostics) in diagnostics_per_file { 196 let mut prime_caches_progress = None;
197 self.diagnostics.set_native_diagnostics(file_id, diagnostics) 197 loop {
198 match task {
199 Task::Response(response) => self.respond(response),
200 Task::Diagnostics(diagnostics_per_file) => {
201 for (file_id, diagnostics) in diagnostics_per_file {
202 self.diagnostics.set_native_diagnostics(file_id, diagnostics)
203 }
204 }
205 Task::Workspaces(workspaces) => self.switch_workspaces(workspaces),
206 Task::PrimeCaches(progress) => {
207 if let PrimeCachesProgress::Started = progress {
208 prime_caches_started = true;
209 }
210
211 prime_caches_progress = Some(progress);
212 }
198 } 213 }
214 // Coalesce multiple task events into one loop turn
215 task = match self.task_pool.receiver.try_recv() {
216 Ok(task) => task,
217 Err(_) => break,
218 };
199 } 219 }
200 Task::Workspaces(workspaces) => self.switch_workspaces(workspaces), 220
201 Task::PrimeCaches(progress) => { 221 if let Some(progress) = prime_caches_progress {
202 let (state, message, fraction); 222 let (state, message, fraction);
203 match progress { 223 match progress {
204 PrimeCachesProgress::Started => { 224 PrimeCachesProgress::Started => {
@@ -218,9 +238,14 @@ impl GlobalState {
218 } 238 }
219 }; 239 };
220 240
221 self.report_progress("indexing", state, message, Some(fraction)); 241 if state != Progress::Begin && prime_caches_started {
242 // Progress indicator needs to be created first.
243 self.report_progress("indexing", Progress::Begin, None, Some(0.0));
244 }
245
246 self.report_progress("indexing", state, message.clone(), Some(fraction));
222 } 247 }
223 }, 248 }
224 Event::Vfs(mut task) => { 249 Event::Vfs(mut task) => {
225 let _p = profile::span("GlobalState::handle_event/vfs"); 250 let _p = profile::span("GlobalState::handle_event/vfs");
226 loop { 251 loop {