rust/crates/rust-analyzer/src/main_loop.rs

777 lines
35 KiB
Rust
Raw Normal View History

2020-02-18 05:33:16 -06:00
//! The main loop of `rust-analyzer` responsible for dispatching LSP
2020-02-18 05:25:26 -06:00
//! requests/replies and notifications back to the client.
use std::{
fmt,
2021-04-06 04:40:31 -05:00
sync::Arc,
time::{Duration, Instant},
};
2018-08-12 16:09:30 -05:00
use always_assert::always;
use crossbeam_channel::{select, Receiver};
use ide_db::base_db::{SourceDatabaseExt, VfsPath};
use lsp_server::{Connection, Notification, Request};
use lsp_types::notification::Notification as _;
use vfs::{ChangeKind, FileId};
2018-08-12 16:09:30 -05:00
2018-10-15 12:15:53 -05:00
use crate::{
2020-06-25 16:26:21 -05:00
config::Config,
2020-06-25 10:50:47 -05:00
dispatch::{NotificationDispatcher, RequestDispatcher},
2020-06-03 04:16:08 -05:00
from_proto,
2021-04-06 06:16:35 -05:00
global_state::{file_id_to_url, url_to_file_id, GlobalState},
2020-06-24 11:57:30 -05:00
handlers, lsp_ext,
2021-05-17 12:07:10 -05:00
lsp_utils::{apply_document_changes, is_cancelled, notification_is, Progress},
mem_docs::DocumentData,
2021-01-28 09:33:02 -06:00
reload::{BuildDataProgress, ProjectWorkspaceProgress},
Result,
2018-08-12 14:08:14 -05:00
};
pub fn main_loop(config: Config, connection: Connection) -> Result<()> {
2021-08-15 07:46:13 -05:00
tracing::info!("initial config: {:#?}", config);
2019-08-31 06:47:37 -05:00
2020-01-26 05:02:56 -06:00
// Windows scheduler implements priority boosts: if thread waits for an
// event (like a condvar), and event fires, priority of the thread is
// temporary bumped. This optimization backfires in our case: each time the
// `main_loop` schedules a task to run on a threadpool, the worker threads
// gets a higher priority, and (on a machine with fewer cores) displaces the
// main loop! We work-around this by marking the main loop as a
// higher-priority thread.
//
// https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities
// https://docs.microsoft.com/en-us/windows/win32/procthread/priority-boosts
// https://github.com/rust-analyzer/rust-analyzer/issues/2835
#[cfg(windows)]
unsafe {
use winapi::um::processthreadsapi::*;
let thread = GetCurrentThread();
let thread_priority_above_normal = 1;
SetThreadPriority(thread, thread_priority_above_normal);
}
2020-08-08 09:42:50 -05:00
GlobalState::new(connection.sender, config).run(connection.receiver)
2020-06-25 10:14:11 -05:00
}
enum Event {
Lsp(lsp_server::Message),
Task(Task),
Vfs(vfs::loader::Message),
Flycheck(flycheck::Message),
}
2020-06-25 15:45:35 -05:00
#[derive(Debug)]
pub(crate) enum Task {
Response(lsp_server::Response),
2020-06-25 15:45:35 -05:00
Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
PrimeCaches(PrimeCachesProgress),
FetchWorkspace(ProjectWorkspaceProgress),
2021-01-28 09:33:02 -06:00
FetchBuildData(BuildDataProgress),
2020-06-25 15:45:35 -05:00
}
#[derive(Debug)]
pub(crate) enum PrimeCachesProgress {
Begin,
Report(ide::PrimeCachesProgress),
End { cancelled: bool },
}
impl fmt::Debug for Event {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| {
f.debug_struct("Notification").field("method", &not.method).finish()
};
match self {
Event::Lsp(lsp_server::Message::Notification(not)) => {
if notification_is::<lsp_types::notification::DidOpenTextDocument>(not)
|| notification_is::<lsp_types::notification::DidChangeTextDocument>(not)
{
return debug_verbose_not(not, f);
}
}
2020-06-25 12:23:52 -05:00
Event::Task(Task::Response(resp)) => {
return f
.debug_struct("Response")
.field("id", &resp.id)
.field("error", &resp.error)
.finish();
}
_ => (),
}
match self {
Event::Lsp(it) => fmt::Debug::fmt(it, f),
Event::Task(it) => fmt::Debug::fmt(it, f),
Event::Vfs(it) => fmt::Debug::fmt(it, f),
Event::Flycheck(it) => fmt::Debug::fmt(it, f),
}
}
}
2020-06-25 10:14:11 -05:00
impl GlobalState {
fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
if self.config.linked_projects().is_empty()
2021-05-23 12:32:22 -05:00
&& self.config.detached_files().is_empty()
&& self.config.notifications().cargo_toml_not_found
2020-07-02 09:47:42 -05:00
{
self.show_message(
lsp_types::MessageType::Error,
"rust-analyzer failed to discover workspace".to_string(),
);
};
if self.config.did_save_text_document_dynamic_registration() {
let save_registration_options = lsp_types::TextDocumentSaveRegistrationOptions {
include_text: Some(false),
text_document_registration_options: lsp_types::TextDocumentRegistrationOptions {
document_selector: Some(vec![
lsp_types::DocumentFilter {
language: None,
scheme: None,
pattern: Some("**/*.rs".into()),
},
lsp_types::DocumentFilter {
language: None,
scheme: None,
pattern: Some("**/Cargo.toml".into()),
},
lsp_types::DocumentFilter {
language: None,
scheme: None,
pattern: Some("**/Cargo.lock".into()),
},
]),
},
};
let registration = lsp_types::Registration {
id: "textDocument/didSave".to_string(),
method: "textDocument/didSave".to_string(),
register_options: Some(serde_json::to_value(save_registration_options).unwrap()),
};
self.send_request::<lsp_types::request::RegisterCapability>(
lsp_types::RegistrationParams { registrations: vec![registration] },
|_, _| (),
);
}
2020-07-02 08:32:56 -05:00
self.fetch_workspaces_queue.request_op();
if self.fetch_workspaces_queue.should_start_op() {
self.fetch_workspaces();
}
2020-06-25 16:26:21 -05:00
2020-06-25 10:14:11 -05:00
while let Some(event) = self.next_event(&inbox) {
if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
if not.method == lsp_types::notification::Exit::METHOD {
return Ok(());
}
2020-06-25 10:14:11 -05:00
}
2020-06-25 16:26:21 -05:00
self.handle_event(event)?
}
2020-06-25 16:26:21 -05:00
Err("client exited without proper shutdown sequence")?
}
2020-06-25 10:14:11 -05:00
2020-07-23 03:26:56 -05:00
fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
select! {
recv(inbox) -> msg =>
msg.ok().map(Event::Lsp),
recv(self.task_pool.receiver) -> task =>
Some(Event::Task(task.unwrap())),
recv(self.loader.receiver) -> task =>
Some(Event::Vfs(task.unwrap())),
recv(self.flycheck_receiver) -> task =>
Some(Event::Flycheck(task.unwrap())),
}
}
2020-06-25 16:26:21 -05:00
fn handle_event(&mut self, event: Event) -> Result<()> {
let loop_start = Instant::now();
// NOTE: don't count blocking select! call as a loop-turn time
2020-08-12 09:32:36 -05:00
let _p = profile::span("GlobalState::handle_event");
2021-08-15 07:46:13 -05:00
tracing::info!("handle_event({:?})", event);
2020-08-25 04:27:22 -05:00
let task_queue_len = self.task_pool.handle.len();
if task_queue_len > 0 {
2021-08-15 07:46:13 -05:00
tracing::info!("task queue len: {}", task_queue_len);
}
2021-04-06 06:16:35 -05:00
let was_quiescent = self.is_quiescent();
match event {
Event::Lsp(msg) => match msg {
lsp_server::Message::Request(req) => self.on_request(loop_start, req)?,
lsp_server::Message::Notification(not) => {
self.on_notification(not)?;
}
2020-06-26 10:07:14 -05:00
lsp_server::Message::Response(resp) => self.complete_request(resp),
},
2020-11-02 10:06:49 -06:00
Event::Task(mut task) => {
let _p = profile::span("GlobalState::handle_event/task");
let mut prime_caches_progress = Vec::new();
2020-11-02 10:06:49 -06:00
loop {
match task {
Task::Response(response) => self.respond(response),
Task::Diagnostics(diagnostics_per_file) => {
for (file_id, diagnostics) in diagnostics_per_file {
self.diagnostics.set_native_diagnostics(file_id, diagnostics)
}
}
Task::PrimeCaches(progress) => match progress {
PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
PrimeCachesProgress::Report(_) => {
match prime_caches_progress.last_mut() {
Some(last @ PrimeCachesProgress::Report(_)) => {
// Coalesce subsequent update events.
*last = progress;
}
_ => prime_caches_progress.push(progress),
}
2020-11-02 10:06:49 -06:00
}
PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
},
Task::FetchWorkspace(progress) => {
let (state, msg) = match progress {
ProjectWorkspaceProgress::Begin => (Progress::Begin, None),
ProjectWorkspaceProgress::Report(msg) => {
(Progress::Report, Some(msg))
}
ProjectWorkspaceProgress::End(workspaces) => {
self.fetch_workspaces_queue.op_completed(workspaces);
2021-04-06 04:40:31 -05:00
let old = Arc::clone(&self.workspaces);
self.switch_workspaces();
2021-04-06 04:40:31 -05:00
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
if self.config.run_build_scripts() && workspaces_updated {
self.fetch_build_data_queue.request_op()
}
2021-04-06 04:40:31 -05:00
(Progress::End, None)
}
};
2021-04-06 04:40:31 -05:00
2021-04-15 13:39:34 -05:00
self.report_progress("Fetching", state, msg, None);
}
2021-01-28 09:33:02 -06:00
Task::FetchBuildData(progress) => {
let (state, msg) = match progress {
BuildDataProgress::Begin => (Some(Progress::Begin), None),
BuildDataProgress::Report(msg) => {
(Some(Progress::Report), Some(msg))
}
BuildDataProgress::End(build_data_result) => {
self.fetch_build_data_queue.op_completed(build_data_result);
2021-04-06 04:40:31 -05:00
self.switch_workspaces();
2021-04-06 04:40:31 -05:00
2021-01-28 09:33:02 -06:00
(Some(Progress::End), None)
}
};
2021-04-06 04:40:31 -05:00
2021-01-28 09:33:02 -06:00
if let Some(state) = state {
2021-04-15 13:39:34 -05:00
self.report_progress("Loading", state, msg, None);
2021-01-28 09:33:02 -06:00
}
}
2020-06-25 12:23:52 -05:00
}
2021-04-06 04:40:31 -05:00
2020-11-02 10:06:49 -06:00
// Coalesce multiple task events into one loop turn
task = match self.task_pool.receiver.try_recv() {
Ok(task) => task,
Err(_) => break,
};
2020-06-25 12:23:52 -05:00
}
2020-11-02 10:06:49 -06:00
for progress in prime_caches_progress {
let (state, message, fraction);
match progress {
PrimeCachesProgress::Begin => {
state = Progress::Begin;
message = None;
fraction = 0.0;
}
PrimeCachesProgress::Report(report) => {
state = Progress::Report;
message = Some(format!(
"{}/{} ({})",
report.n_done, report.n_total, report.on_crate
));
fraction = Progress::fraction(report.n_done, report.n_total);
}
PrimeCachesProgress::End { cancelled } => {
state = Progress::End;
message = None;
fraction = 1.0;
self.prime_caches_queue.op_completed(());
if cancelled {
self.prime_caches_queue.request_op();
}
}
};
2021-04-15 13:39:34 -05:00
self.report_progress("Indexing", state, message, Some(fraction));
}
2020-11-02 10:06:49 -06:00
}
2020-07-10 15:29:40 -05:00
Event::Vfs(mut task) => {
2020-08-12 09:32:36 -05:00
let _p = profile::span("GlobalState::handle_event/vfs");
2020-07-10 15:29:40 -05:00
loop {
match task {
vfs::loader::Message::Loaded { files } => {
let vfs = &mut self.vfs.write().0;
for (path, contents) in files {
let path = VfsPath::from(path);
if !self.mem_docs.contains(&path) {
vfs.set_file_contents(path, contents);
2020-07-10 15:29:40 -05:00
}
}
}
vfs::loader::Message::Progress { n_total, n_done, config_version } => {
2021-04-06 06:16:35 -05:00
always!(config_version <= self.vfs_config_version);
self.vfs_progress_config_version = config_version;
self.vfs_progress_n_total = n_total;
self.vfs_progress_n_done = n_done;
2021-04-06 06:16:35 -05:00
let state = if n_done == 0 {
Progress::Begin
} else if n_done < n_total {
Progress::Report
2020-07-10 15:29:40 -05:00
} else {
assert_eq!(n_done, n_total);
Progress::End
};
self.report_progress(
2021-04-15 13:39:34 -05:00
"Roots Scanned",
state,
Some(format!("{}/{}", n_done, n_total)),
Some(Progress::fraction(n_done, n_total)),
)
2020-06-25 10:14:11 -05:00
}
}
2020-07-10 15:29:40 -05:00
// Coalesce many VFS event into a single loop turn
task = match self.loader.receiver.try_recv() {
Ok(task) => task,
Err(_) => break,
}
}
2020-07-10 15:29:40 -05:00
}
2020-11-27 15:52:22 -06:00
Event::Flycheck(mut task) => {
let _p = profile::span("GlobalState::handle_event/flycheck");
loop {
match task {
flycheck::Message::AddDiagnostic { workspace_root, diagnostic } => {
let diagnostics =
crate::diagnostics::to_proto::map_rust_diagnostic_to_lsp(
&self.config.diagnostics_map(),
2020-11-27 15:52:22 -06:00
&diagnostic,
&workspace_root,
);
for diag in diagnostics {
match url_to_file_id(&self.vfs.read().0, &diag.url) {
Ok(file_id) => self.diagnostics.add_check_diagnostic(
file_id,
diag.diagnostic,
diag.fixes,
),
Err(err) => {
2021-08-15 07:46:13 -05:00
tracing::error!(
2020-11-27 15:52:22 -06:00
"File with cargo diagnostic not found in VFS: {}",
err
);
}
};
}
2020-06-26 09:17:22 -05:00
}
2020-11-27 15:52:22 -06:00
flycheck::Message::Progress { id, progress } => {
let (state, message) = match progress {
flycheck::Progress::DidStart => {
self.diagnostics.clear_check();
(Progress::Begin, None)
}
flycheck::Progress::DidCheckCrate(target) => {
(Progress::Report, Some(target))
}
flycheck::Progress::DidCancel => (Progress::End, None),
flycheck::Progress::DidFinish(result) => {
if let Err(err) = result {
2021-08-15 07:46:13 -05:00
tracing::error!("cargo check failed: {}", err)
2020-11-27 15:52:22 -06:00
}
(Progress::End, None)
}
};
// When we're running multiple flychecks, we have to include a disambiguator in
// the title, or the editor complains. Note that this is a user-facing string.
let title = if self.flycheck.len() == 1 {
"cargo check".to_string()
} else {
format!("cargo check (#{})", id + 1)
};
self.report_progress(&title, state, message, None);
}
}
// Coalesce many flycheck updates into a single loop turn
task = match self.flycheck_receiver.try_recv() {
Ok(task) => task,
Err(_) => break,
}
2020-06-25 10:14:11 -05:00
}
2020-11-27 15:52:22 -06:00
}
}
let state_changed = self.process_changes();
let memdocs_added_or_removed = self.mem_docs.take_changes();
2021-04-06 06:16:35 -05:00
if self.is_quiescent() {
if !was_quiescent {
for flycheck in &self.flycheck {
flycheck.update();
}
self.prime_caches_queue.request_op();
2020-06-25 10:14:11 -05:00
}
if !was_quiescent || state_changed {
// Refresh semantic tokens if the client supports it.
if self.config.semantic_tokens_refresh() {
self.semantic_tokens_cache.lock().clear();
self.send_request::<lsp_types::request::SemanticTokensRefesh>((), |_, _| ());
}
// Refresh code lens if the client supports it.
if self.config.code_lens_refresh() {
self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
}
}
2020-12-09 13:36:47 -06:00
if !was_quiescent || state_changed || memdocs_added_or_removed {
if self.config.publish_diagnostics() {
self.update_diagnostics()
}
2020-12-09 13:36:47 -06:00
}
}
2020-06-25 10:14:11 -05:00
2020-06-25 15:45:35 -05:00
if let Some(diagnostic_changes) = self.diagnostics.take_changes() {
for file_id in diagnostic_changes {
let db = self.analysis_host.raw_database();
let source_root = db.file_source_root(file_id);
if db.source_root(source_root).is_library {
// Only publish diagnostics for files in the workspace, not from crates.io deps
// or the sysroot.
// While theoretically these should never have errors, we have quite a few false
// positives particularly in the stdlib, and those diagnostics would stay around
// forever if we emitted them here.
continue;
}
2020-06-25 15:45:35 -05:00
let url = file_id_to_url(&self.vfs.read().0, file_id);
let diagnostics = self.diagnostics.diagnostics_for(file_id).cloned().collect();
let version = from_proto::vfs_path(&url)
.map(|path| self.mem_docs.get(&path).map(|it| it.version))
.unwrap_or_default();
2020-06-26 10:07:14 -05:00
self.send_notification::<lsp_types::notification::PublishDiagnostics>(
lsp_types::PublishDiagnosticsParams { uri: url, diagnostics, version },
2020-06-26 10:07:14 -05:00
);
2020-06-25 15:45:35 -05:00
}
}
if self.config.cargo_autoreload() {
if self.fetch_workspaces_queue.should_start_op() {
self.fetch_workspaces();
}
}
if self.fetch_build_data_queue.should_start_op() {
self.fetch_build_data();
}
if self.prime_caches_queue.should_start_op() {
self.task_pool.handle.spawn_with_sender({
let analysis = self.snapshot().analysis;
move |sender| {
sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
let res = analysis.prime_caches(|progress| {
let report = PrimeCachesProgress::Report(progress);
sender.send(Task::PrimeCaches(report)).unwrap();
});
sender
.send(Task::PrimeCaches(PrimeCachesProgress::End {
cancelled: res.is_err(),
}))
.unwrap();
}
});
}
let status = self.current_status();
if self.last_reported_status.as_ref() != Some(&status) {
self.last_reported_status = Some(status.clone());
if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
self.show_message(lsp_types::MessageType::Error, message.clone());
}
if self.config.server_status_notification() {
self.send_notification::<lsp_ext::ServerStatusNotification>(status);
}
}
2021-04-06 06:16:35 -05:00
let loop_duration = loop_start.elapsed();
if loop_duration > Duration::from_millis(100) {
2021-08-15 07:46:13 -05:00
tracing::warn!("overly long loop turn: {:?}", loop_duration);
self.poke_rust_analyzer_developer(format!(
"overly long loop turn: {:?}",
loop_duration
));
2020-06-25 10:14:11 -05:00
}
Ok(())
2020-06-25 10:14:11 -05:00
}
fn on_request(&mut self, request_received: Instant, req: Request) -> Result<()> {
2020-06-26 10:07:14 -05:00
self.register_request(&req, request_received);
2020-06-25 12:23:52 -05:00
if self.shutdown_requested {
self.respond(lsp_server::Response::new_err(
req.id,
lsp_server::ErrorCode::InvalidRequest as i32,
"Shutdown already requested.".to_owned(),
));
return Ok(());
}
2021-04-06 06:16:35 -05:00
// Avoid flashing a bunch of unresolved references during initial load.
if self.workspaces.is_empty() && !self.is_quiescent() {
self.respond(lsp_server::Response::new_err(
req.id,
// FIXME: i32 should impl From<ErrorCode> (from() guarantees lossless conversion)
lsp_server::ErrorCode::ContentModified as i32,
"waiting for cargo metadata or cargo check".to_owned(),
));
return Ok(());
}
2020-06-25 12:23:52 -05:00
RequestDispatcher { req: Some(req), global_state: self }
.on_sync_mut::<lsp_ext::ReloadWorkspace>(|s, ()| {
s.fetch_workspaces_queue.request_op();
Ok(())
})?
.on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
s.shutdown_requested = true;
Ok(())
})?
.on_sync_mut::<lsp_ext::MemoryUsage>(|s, p| handlers::handle_memory_usage(s, p))?
.on_sync::<lsp_ext::JoinLines>(handlers::handle_join_lines)?
.on_sync::<lsp_ext::OnEnter>(handlers::handle_on_enter)?
.on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range)?
.on_sync::<lsp_ext::MatchingBrace>(handlers::handle_matching_brace)?
.on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)
.on::<lsp_ext::SyntaxTree>(handlers::handle_syntax_tree)
.on::<lsp_ext::ViewHir>(handlers::handle_view_hir)
.on::<lsp_ext::ViewCrateGraph>(handlers::handle_view_crate_graph)
2021-05-21 16:59:52 -05:00
.on::<lsp_ext::ViewItemTree>(handlers::handle_view_item_tree)
.on::<lsp_ext::ExpandMacro>(handlers::handle_expand_macro)
.on::<lsp_ext::ParentModule>(handlers::handle_parent_module)
.on::<lsp_ext::Runnables>(handlers::handle_runnables)
2021-02-27 11:04:43 -06:00
.on::<lsp_ext::RelatedTests>(handlers::handle_related_tests)
.on::<lsp_ext::InlayHints>(handlers::handle_inlay_hints)
.on::<lsp_ext::CodeActionRequest>(handlers::handle_code_action)
.on::<lsp_ext::CodeActionResolveRequest>(handlers::handle_code_action_resolve)
.on::<lsp_ext::HoverRequest>(handlers::handle_hover)
.on::<lsp_ext::ExternalDocs>(handlers::handle_open_docs)
2020-11-12 19:48:07 -06:00
.on::<lsp_ext::OpenCargoToml>(handlers::handle_open_cargo_toml)
2021-03-16 07:37:00 -05:00
.on::<lsp_ext::MoveItem>(handlers::handle_move_item)
.on::<lsp_ext::WorkspaceSymbol>(handlers::handle_workspace_symbol)
.on::<lsp_types::request::OnTypeFormatting>(handlers::handle_on_type_formatting)
.on::<lsp_types::request::DocumentSymbolRequest>(handlers::handle_document_symbol)
.on::<lsp_types::request::GotoDefinition>(handlers::handle_goto_definition)
2021-06-22 13:43:48 -05:00
.on::<lsp_types::request::GotoDeclaration>(handlers::handle_goto_declaration)
.on::<lsp_types::request::GotoImplementation>(handlers::handle_goto_implementation)
.on::<lsp_types::request::GotoTypeDefinition>(handlers::handle_goto_type_definition)
2020-12-04 02:02:22 -06:00
.on::<lsp_types::request::Completion>(handlers::handle_completion)
.on::<lsp_types::request::ResolveCompletionItem>(handlers::handle_completion_resolve)
.on::<lsp_types::request::CodeLensRequest>(handlers::handle_code_lens)
.on::<lsp_types::request::CodeLensResolve>(handlers::handle_code_lens_resolve)
.on::<lsp_types::request::FoldingRangeRequest>(handlers::handle_folding_range)
.on::<lsp_types::request::SignatureHelpRequest>(handlers::handle_signature_help)
.on::<lsp_types::request::PrepareRenameRequest>(handlers::handle_prepare_rename)
.on::<lsp_types::request::Rename>(handlers::handle_rename)
.on::<lsp_types::request::References>(handlers::handle_references)
.on::<lsp_types::request::Formatting>(handlers::handle_formatting)
2021-05-04 16:13:51 -05:00
.on::<lsp_types::request::RangeFormatting>(handlers::handle_range_formatting)
.on::<lsp_types::request::DocumentHighlightRequest>(handlers::handle_document_highlight)
.on::<lsp_types::request::CallHierarchyPrepare>(handlers::handle_call_hierarchy_prepare)
2020-06-25 10:14:11 -05:00
.on::<lsp_types::request::CallHierarchyIncomingCalls>(
handlers::handle_call_hierarchy_incoming,
)
2020-06-25 10:14:11 -05:00
.on::<lsp_types::request::CallHierarchyOutgoingCalls>(
handlers::handle_call_hierarchy_outgoing,
)
.on::<lsp_types::request::SemanticTokensFullRequest>(
handlers::handle_semantic_tokens_full,
)
.on::<lsp_types::request::SemanticTokensFullDeltaRequest>(
handlers::handle_semantic_tokens_full_delta,
)
2020-06-25 10:14:11 -05:00
.on::<lsp_types::request::SemanticTokensRangeRequest>(
handlers::handle_semantic_tokens_range,
)
.on::<lsp_types::request::WillRenameFiles>(handlers::handle_will_rename_files)
.on::<lsp_ext::Ssr>(handlers::handle_ssr)
2020-06-25 10:14:11 -05:00
.finish();
Ok(())
}
fn on_notification(&mut self, not: Notification) -> Result<()> {
2020-06-25 10:50:47 -05:00
NotificationDispatcher { not: Some(not), global_state: self }
.on::<lsp_types::notification::Cancel>(|this, params| {
let id: lsp_server::RequestId = match params.id {
lsp_types::NumberOrString::Number(id) => id.into(),
lsp_types::NumberOrString::String(id) => id.into(),
2020-06-25 10:14:11 -05:00
};
2020-06-26 10:07:14 -05:00
this.cancel(id);
2020-06-25 10:50:47 -05:00
Ok(())
})?
.on::<lsp_types::notification::WorkDoneProgressCancel>(|_this, _params| {
// Just ignore this. It is OK to continue sending progress
// notifications for this token, as the client can't know when
// we accepted notification.
Ok(())
})?
2020-06-25 10:50:47 -05:00
.on::<lsp_types::notification::DidOpenTextDocument>(|this, params| {
2020-06-25 10:14:11 -05:00
if let Ok(path) = from_proto::vfs_path(&params.text_document.uri) {
if this
.mem_docs
.insert(path.clone(), DocumentData::new(params.text_document.version))
.is_err()
{
2021-08-15 07:46:13 -05:00
tracing::error!("duplicate DidOpenTextDocument: {}", path)
2020-06-25 10:14:11 -05:00
}
this.vfs
2020-06-25 10:14:11 -05:00
.write()
.0
.set_file_contents(path, Some(params.text_document.text.into_bytes()));
}
2020-06-25 10:50:47 -05:00
Ok(())
})?
.on::<lsp_types::notification::DidChangeTextDocument>(|this, params| {
2020-06-25 10:14:11 -05:00
if let Ok(path) = from_proto::vfs_path(&params.text_document.uri) {
match this.mem_docs.get_mut(&path) {
Some(doc) => {
// The version passed in DidChangeTextDocument is the version after all edits are applied
// so we should apply it before the vfs is notified.
doc.version = params.text_document.version;
}
None => {
2021-08-15 07:46:13 -05:00
tracing::error!("expected DidChangeTextDocument: {}", path);
return Ok(());
}
};
2020-06-25 10:50:47 -05:00
let vfs = &mut this.vfs.write().0;
2020-06-25 10:14:11 -05:00
let file_id = vfs.file_id(&path).unwrap();
let mut text = String::from_utf8(vfs.file_contents(file_id).to_vec()).unwrap();
apply_document_changes(&mut text, params.content_changes);
2021-07-27 04:28:58 -05:00
vfs.set_file_contents(path, Some(text.into_bytes()));
2020-06-25 10:14:11 -05:00
}
2020-06-25 10:50:47 -05:00
Ok(())
})?
.on::<lsp_types::notification::DidCloseTextDocument>(|this, params| {
2020-06-25 10:14:11 -05:00
if let Ok(path) = from_proto::vfs_path(&params.text_document.uri) {
if this.mem_docs.remove(&path).is_err() {
2021-08-15 07:46:13 -05:00
tracing::error!("orphan DidCloseTextDocument: {}", path);
2020-06-25 10:14:11 -05:00
}
2020-08-05 20:35:35 -05:00
this.semantic_tokens_cache.lock().remove(&params.text_document.uri);
2020-07-24 16:55:17 -05:00
2020-06-25 10:14:11 -05:00
if let Some(path) = path.as_path() {
2020-06-25 17:27:39 -05:00
this.loader.handle.invalidate(path.to_path_buf());
2020-06-25 10:14:11 -05:00
}
}
2020-06-25 10:50:47 -05:00
Ok(())
})?
2020-07-02 08:32:56 -05:00
.on::<lsp_types::notification::DidSaveTextDocument>(|this, params| {
for flycheck in &this.flycheck {
flycheck.update();
2020-06-25 10:14:11 -05:00
}
if let Ok(abs_path) = from_proto::abs_path(&params.text_document.uri) {
this.maybe_refresh(&[(abs_path, ChangeKind::Modify)]);
}
2020-06-25 10:50:47 -05:00
Ok(())
})?
.on::<lsp_types::notification::DidChangeConfiguration>(|this, _params| {
2020-06-25 10:14:11 -05:00
// As stated in https://github.com/microsoft/language-server-protocol/issues/676,
// this notification's parameters should be ignored and the actual config queried separately.
2020-06-26 10:07:14 -05:00
this.send_request::<lsp_types::request::WorkspaceConfiguration>(
2020-06-25 10:14:11 -05:00
lsp_types::ConfigurationParams {
items: vec![lsp_types::ConfigurationItem {
scope_uri: None,
section: Some("rust-analyzer".to_string()),
}],
},
|this, resp| {
2021-08-15 07:46:13 -05:00
tracing::debug!("config update response: '{:?}", resp);
let lsp_server::Response { error, result, .. } = resp;
2020-06-25 10:14:11 -05:00
match (error, result) {
(Some(err), _) => {
2021-08-15 07:46:13 -05:00
tracing::error!("failed to fetch the server settings: {:?}", err)
2020-06-25 10:14:11 -05:00
}
(None, Some(mut configs)) => {
if let Some(json) = configs.get_mut(0) {
// Note that json can be null according to the spec if the client can't
// provide a configuration. This is handled in Config::update below.
let mut config = Config::clone(&*this.config);
config.update(json.take());
2020-06-25 10:14:11 -05:00
this.update_configuration(config);
}
}
2021-08-15 07:46:13 -05:00
(None, None) => tracing::error!(
2020-06-25 10:14:11 -05:00
"received empty server settings response from the client"
),
}
},
);
2021-06-12 23:05:29 -05:00
Ok(())
2020-06-25 10:50:47 -05:00
})?
.on::<lsp_types::notification::DidChangeWatchedFiles>(|this, params| {
2020-06-25 10:14:11 -05:00
for change in params.changes {
if let Ok(path) = from_proto::abs_path(&change.uri) {
2020-06-25 17:27:39 -05:00
this.loader.handle.invalidate(path);
2020-06-25 10:14:11 -05:00
}
}
2020-06-25 10:50:47 -05:00
Ok(())
})?
.finish();
2020-06-25 10:14:11 -05:00
Ok(())
}
fn update_diagnostics(&mut self) {
let subscriptions = self
.mem_docs
.iter()
2021-06-12 22:54:16 -05:00
.map(|path| self.vfs.read().0.file_id(path).unwrap())
.collect::<Vec<_>>();
2021-08-15 07:46:13 -05:00
tracing::trace!("updating notifications for {:?}", subscriptions);
let snapshot = self.snapshot();
self.task_pool.handle.spawn(move || {
let diagnostics = subscriptions
.into_iter()
.filter_map(|file_id| {
handlers::publish_diagnostics(&snapshot, file_id)
.map_err(|err| {
if !is_cancelled(&*err) {
2021-08-15 07:46:13 -05:00
tracing::error!("failed to compute diagnostics: {:?}", err);
}
()
})
.ok()
.map(|diags| (file_id, diags))
})
.collect::<Vec<_>>();
Task::Diagnostics(diagnostics)
})
2019-08-31 06:47:37 -05:00
}
2018-09-01 09:40:45 -05:00
}