internal: make scheduling control flow more obvious
There should be only one place where we need to check if we want to start background activities.
This commit is contained in:
parent
a59f344c4f
commit
9e0203bd69
@ -152,8 +152,10 @@ fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
|
||||
);
|
||||
}
|
||||
|
||||
self.fetch_workspaces_request();
|
||||
self.fetch_workspaces_if_needed();
|
||||
self.fetch_workspaces_queue.request_op();
|
||||
if self.fetch_workspaces_queue.should_start_op() {
|
||||
self.fetch_workspaces();
|
||||
}
|
||||
|
||||
while let Some(event) = self.next_event(&inbox) {
|
||||
if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
|
||||
@ -234,14 +236,14 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
|
||||
(Progress::Report, Some(msg))
|
||||
}
|
||||
ProjectWorkspaceProgress::End(workspaces) => {
|
||||
self.fetch_workspaces_completed(workspaces);
|
||||
self.fetch_workspaces_queue.op_completed(workspaces);
|
||||
|
||||
let old = Arc::clone(&self.workspaces);
|
||||
self.switch_workspaces();
|
||||
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
|
||||
|
||||
if self.config.run_build_scripts() && workspaces_updated {
|
||||
self.fetch_build_data_request()
|
||||
self.fetch_build_data_queue.request_op()
|
||||
}
|
||||
|
||||
(Progress::End, None)
|
||||
@ -257,7 +259,7 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
|
||||
(Some(Progress::Report), Some(msg))
|
||||
}
|
||||
BuildDataProgress::End(build_data_result) => {
|
||||
self.fetch_build_data_completed(build_data_result);
|
||||
self.fetch_build_data_queue.op_completed(build_data_result);
|
||||
|
||||
self.switch_workspaces();
|
||||
|
||||
@ -425,7 +427,6 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
|
||||
}
|
||||
|
||||
if !was_quiescent || state_changed {
|
||||
// Ensure that only one cache priming task can run at a time
|
||||
self.prime_caches_queue.request_op();
|
||||
|
||||
// Refresh semantic tokens if the client supports it.
|
||||
@ -473,10 +474,13 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
|
||||
}
|
||||
|
||||
if self.config.cargo_autoreload() {
|
||||
self.fetch_workspaces_if_needed();
|
||||
if self.fetch_workspaces_queue.should_start_op() {
|
||||
self.fetch_workspaces();
|
||||
}
|
||||
}
|
||||
if self.fetch_build_data_queue.should_start_op() {
|
||||
self.fetch_build_data();
|
||||
}
|
||||
self.fetch_build_data_if_needed();
|
||||
|
||||
if self.prime_caches_queue.should_start_op() {
|
||||
self.task_pool.handle.spawn_with_sender({
|
||||
let analysis = self.snapshot().analysis;
|
||||
@ -495,7 +499,18 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
|
||||
});
|
||||
}
|
||||
|
||||
self.report_new_status_if_needed();
|
||||
let status = self.current_status();
|
||||
if self.last_reported_status.as_ref() != Some(&status) {
|
||||
self.last_reported_status = Some(status.clone());
|
||||
|
||||
if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
|
||||
self.show_message(lsp_types::MessageType::Error, message.clone());
|
||||
}
|
||||
|
||||
if self.config.server_status_notification() {
|
||||
self.send_notification::<lsp_ext::ServerStatusNotification>(status);
|
||||
}
|
||||
}
|
||||
|
||||
let loop_duration = loop_start.elapsed();
|
||||
if loop_duration > Duration::from_millis(100) {
|
||||
@ -534,8 +549,7 @@ fn on_request(&mut self, request_received: Instant, req: Request) -> Result<()>
|
||||
|
||||
RequestDispatcher { req: Some(req), global_state: self }
|
||||
.on_sync_mut::<lsp_ext::ReloadWorkspace>(|s, ()| {
|
||||
s.fetch_workspaces_request();
|
||||
s.fetch_workspaces_if_needed();
|
||||
s.fetch_workspaces_queue.request_op();
|
||||
Ok(())
|
||||
})?
|
||||
.on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
|
||||
|
@ -47,7 +47,7 @@ pub(crate) fn update_configuration(&mut self, config: Config) {
|
||||
self.analysis_host.update_lru_capacity(self.config.lru_capacity());
|
||||
}
|
||||
if self.config.linked_projects() != old_config.linked_projects() {
|
||||
self.fetch_workspaces_request()
|
||||
self.fetch_workspaces_queue.request_op()
|
||||
} else if self.config.flycheck() != old_config.flycheck() {
|
||||
self.reload_flycheck();
|
||||
}
|
||||
@ -71,7 +71,7 @@ pub(crate) fn maybe_refresh(&mut self, changes: &[(AbsPathBuf, ChangeKind)]) {
|
||||
", "
|
||||
)
|
||||
);
|
||||
self.fetch_workspaces_request();
|
||||
self.fetch_workspaces_queue.request_op();
|
||||
|
||||
fn is_interesting(path: &AbsPath, change_kind: ChangeKind) -> bool {
|
||||
const IMPLICIT_TARGET_FILES: &[&str] = &["build.rs", "src/main.rs", "src/lib.rs"];
|
||||
@ -109,7 +109,8 @@ fn is_interesting(path: &AbsPath, change_kind: ChangeKind) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
pub(crate) fn report_new_status_if_needed(&mut self) {
|
||||
|
||||
pub(crate) fn current_status(&self) -> lsp_ext::ServerStatusParams {
|
||||
let mut status = lsp_ext::ServerStatusParams {
|
||||
health: lsp_ext::Health::Ok,
|
||||
quiescent: self.is_quiescent(),
|
||||
@ -132,27 +133,10 @@ pub(crate) fn report_new_status_if_needed(&mut self) {
|
||||
status.health = lsp_ext::Health::Error;
|
||||
status.message = Some(error)
|
||||
}
|
||||
|
||||
if self.last_reported_status.as_ref() != Some(&status) {
|
||||
self.last_reported_status = Some(status.clone());
|
||||
|
||||
if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
|
||||
self.show_message(lsp_types::MessageType::Error, message.clone());
|
||||
}
|
||||
|
||||
if self.config.server_status_notification() {
|
||||
self.send_notification::<lsp_ext::ServerStatusNotification>(status);
|
||||
}
|
||||
}
|
||||
status
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_workspaces_request(&mut self) {
|
||||
self.fetch_workspaces_queue.request_op()
|
||||
}
|
||||
pub(crate) fn fetch_workspaces_if_needed(&mut self) {
|
||||
if !self.fetch_workspaces_queue.should_start_op() {
|
||||
return;
|
||||
}
|
||||
pub(crate) fn fetch_workspaces(&mut self) {
|
||||
tracing::info!("will fetch workspaces");
|
||||
|
||||
self.task_pool.handle.spawn_with_sender({
|
||||
@ -203,21 +187,8 @@ pub(crate) fn fetch_workspaces_if_needed(&mut self) {
|
||||
}
|
||||
});
|
||||
}
|
||||
pub(crate) fn fetch_workspaces_completed(
|
||||
&mut self,
|
||||
workspaces: Vec<anyhow::Result<ProjectWorkspace>>,
|
||||
) {
|
||||
self.fetch_workspaces_queue.op_completed(workspaces)
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_build_data_request(&mut self) {
|
||||
self.fetch_build_data_queue.request_op();
|
||||
}
|
||||
pub(crate) fn fetch_build_data_if_needed(&mut self) {
|
||||
if !self.fetch_build_data_queue.should_start_op() {
|
||||
return;
|
||||
}
|
||||
|
||||
pub(crate) fn fetch_build_data(&mut self) {
|
||||
let workspaces = Arc::clone(&self.workspaces);
|
||||
let config = self.config.cargo();
|
||||
self.task_pool.handle.spawn_with_sender(move |sender| {
|
||||
@ -236,12 +207,6 @@ pub(crate) fn fetch_build_data_if_needed(&mut self) {
|
||||
sender.send(Task::FetchBuildData(BuildDataProgress::End((workspaces, res)))).unwrap();
|
||||
});
|
||||
}
|
||||
pub(crate) fn fetch_build_data_completed(
|
||||
&mut self,
|
||||
build_data: (Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>),
|
||||
) {
|
||||
self.fetch_build_data_queue.op_completed(build_data)
|
||||
}
|
||||
|
||||
pub(crate) fn switch_workspaces(&mut self) {
|
||||
let _p = profile::span("GlobalState::switch_workspaces");
|
||||
|
@ -257,6 +257,25 @@ if idx >= len {
|
||||
|
||||
**Rationale:** it's useful to see the invariant relied upon by the rest of the function clearly spelled out.
|
||||
|
||||
## Control Flow
|
||||
|
||||
As a special case of the previous rule, do not hide control flow inside functions, push it to the caller:
|
||||
|
||||
```rust
|
||||
// GOOD
|
||||
if cond {
|
||||
f()
|
||||
}
|
||||
|
||||
// BAD
|
||||
fn f() {
|
||||
if !cond {
|
||||
return;
|
||||
}
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Assertions
|
||||
|
||||
Assert liberally.
|
||||
|
Loading…
Reference in New Issue
Block a user