2020-02-18 05:33:16 -06:00
|
|
|
//! The main loop of `rust-analyzer` responsible for dispatching LSP
|
2020-02-18 05:25:26 -06:00
|
|
|
//! requests/replies and notifications back to the client.
|
2020-01-29 04:21:49 -06:00
|
|
|
use std::{
|
2020-06-25 02:51:30 -05:00
|
|
|
env, fmt, panic,
|
2020-01-29 04:21:49 -06:00
|
|
|
time::{Duration, Instant},
|
|
|
|
};
|
2018-08-12 16:09:30 -05:00
|
|
|
|
2020-07-15 07:37:44 -05:00
|
|
|
use crossbeam_channel::{select, Receiver};
|
2020-10-06 10:58:03 -05:00
|
|
|
use ide::PrimeCachesProgress;
|
2020-08-13 10:42:52 -05:00
|
|
|
use ide::{Canceled, FileId};
|
2020-10-24 03:39:57 -05:00
|
|
|
use ide_db::base_db::VfsPath;
|
2020-06-25 10:50:47 -05:00
|
|
|
use lsp_server::{Connection, Notification, Request, Response};
|
2020-08-03 15:35:32 -05:00
|
|
|
use lsp_types::notification::Notification as _;
|
2020-08-25 04:27:22 -05:00
|
|
|
use project_model::ProjectWorkspace;
|
|
|
|
use vfs::ChangeKind;
|
2018-08-12 16:09:30 -05:00
|
|
|
|
2018-10-15 12:15:53 -05:00
|
|
|
use crate::{
|
2020-06-25 16:26:21 -05:00
|
|
|
config::Config,
|
2020-06-25 10:50:47 -05:00
|
|
|
dispatch::{NotificationDispatcher, RequestDispatcher},
|
2020-07-24 08:01:48 -05:00
|
|
|
document::DocumentData,
|
2020-06-03 04:16:08 -05:00
|
|
|
from_proto,
|
2020-06-26 05:02:59 -05:00
|
|
|
global_state::{file_id_to_url, url_to_file_id, GlobalState, Status},
|
2020-06-24 11:57:30 -05:00
|
|
|
handlers, lsp_ext,
|
2020-06-26 10:07:14 -05:00
|
|
|
lsp_utils::{apply_document_changes, is_canceled, notification_is, Progress},
|
2020-06-25 10:22:18 -05:00
|
|
|
Result,
|
2018-08-12 14:08:14 -05:00
|
|
|
};
|
|
|
|
|
2020-06-03 05:22:01 -05:00
|
|
|
pub fn main_loop(config: Config, connection: Connection) -> Result<()> {
|
2020-04-01 10:22:56 -05:00
|
|
|
log::info!("initial config: {:#?}", config);
|
2019-08-31 06:47:37 -05:00
|
|
|
|
2020-01-26 05:02:56 -06:00
|
|
|
// Windows scheduler implements priority boosts: if thread waits for an
|
|
|
|
// event (like a condvar), and event fires, priority of the thread is
|
|
|
|
// temporary bumped. This optimization backfires in our case: each time the
|
|
|
|
// `main_loop` schedules a task to run on a threadpool, the worker threads
|
|
|
|
// gets a higher priority, and (on a machine with fewer cores) displaces the
|
|
|
|
// main loop! We work-around this by marking the main loop as a
|
|
|
|
// higher-priority thread.
|
|
|
|
//
|
|
|
|
// https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities
|
|
|
|
// https://docs.microsoft.com/en-us/windows/win32/procthread/priority-boosts
|
|
|
|
// https://github.com/rust-analyzer/rust-analyzer/issues/2835
|
|
|
|
#[cfg(windows)]
|
|
|
|
unsafe {
|
|
|
|
use winapi::um::processthreadsapi::*;
|
|
|
|
let thread = GetCurrentThread();
|
|
|
|
let thread_priority_above_normal = 1;
|
|
|
|
SetThreadPriority(thread, thread_priority_above_normal);
|
|
|
|
}
|
|
|
|
|
2020-08-08 09:42:50 -05:00
|
|
|
GlobalState::new(connection.sender, config).run(connection.receiver)
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
|
|
|
|
2020-06-25 12:01:16 -05:00
|
|
|
enum Event {
|
|
|
|
Lsp(lsp_server::Message),
|
|
|
|
Task(Task),
|
|
|
|
Vfs(vfs::loader::Message),
|
|
|
|
Flycheck(flycheck::Message),
|
|
|
|
}
|
|
|
|
|
2020-06-25 15:45:35 -05:00
|
|
|
#[derive(Debug)]
|
|
|
|
pub(crate) enum Task {
|
|
|
|
Response(Response),
|
|
|
|
Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
|
2020-07-02 09:47:42 -05:00
|
|
|
Workspaces(Vec<anyhow::Result<ProjectWorkspace>>),
|
2020-10-06 10:58:03 -05:00
|
|
|
PrimeCaches(PrimeCachesProgress),
|
2020-06-25 15:45:35 -05:00
|
|
|
}
|
|
|
|
|
2020-06-25 12:01:16 -05:00
|
|
|
impl fmt::Debug for Event {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| {
|
|
|
|
f.debug_struct("Notification").field("method", ¬.method).finish()
|
|
|
|
};
|
|
|
|
|
|
|
|
match self {
|
|
|
|
Event::Lsp(lsp_server::Message::Notification(not)) => {
|
|
|
|
if notification_is::<lsp_types::notification::DidOpenTextDocument>(not)
|
|
|
|
|| notification_is::<lsp_types::notification::DidChangeTextDocument>(not)
|
|
|
|
{
|
|
|
|
return debug_verbose_not(not, f);
|
|
|
|
}
|
|
|
|
}
|
2020-06-25 12:23:52 -05:00
|
|
|
Event::Task(Task::Response(resp)) => {
|
2020-06-25 12:01:16 -05:00
|
|
|
return f
|
|
|
|
.debug_struct("Response")
|
|
|
|
.field("id", &resp.id)
|
|
|
|
.field("error", &resp.error)
|
|
|
|
.finish();
|
|
|
|
}
|
|
|
|
_ => (),
|
|
|
|
}
|
|
|
|
match self {
|
|
|
|
Event::Lsp(it) => fmt::Debug::fmt(it, f),
|
|
|
|
Event::Task(it) => fmt::Debug::fmt(it, f),
|
|
|
|
Event::Vfs(it) => fmt::Debug::fmt(it, f),
|
|
|
|
Event::Flycheck(it) => fmt::Debug::fmt(it, f),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-25 10:14:11 -05:00
|
|
|
impl GlobalState {
|
|
|
|
fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
|
2020-07-02 09:47:42 -05:00
|
|
|
if self.config.linked_projects.is_empty() && self.config.notifications.cargo_toml_not_found
|
|
|
|
{
|
|
|
|
self.show_message(
|
|
|
|
lsp_types::MessageType::Error,
|
|
|
|
"rust-analyzer failed to discover workspace".to_string(),
|
|
|
|
);
|
|
|
|
};
|
|
|
|
|
2020-07-14 10:03:44 -05:00
|
|
|
let save_registration_options = lsp_types::TextDocumentSaveRegistrationOptions {
|
|
|
|
include_text: Some(false),
|
|
|
|
text_document_registration_options: lsp_types::TextDocumentRegistrationOptions {
|
|
|
|
document_selector: Some(vec![
|
|
|
|
lsp_types::DocumentFilter {
|
|
|
|
language: None,
|
|
|
|
scheme: None,
|
|
|
|
pattern: Some("**/*.rs".into()),
|
|
|
|
},
|
|
|
|
lsp_types::DocumentFilter {
|
|
|
|
language: None,
|
|
|
|
scheme: None,
|
|
|
|
pattern: Some("**/Cargo.toml".into()),
|
|
|
|
},
|
|
|
|
lsp_types::DocumentFilter {
|
|
|
|
language: None,
|
|
|
|
scheme: None,
|
|
|
|
pattern: Some("**/Cargo.lock".into()),
|
|
|
|
},
|
|
|
|
]),
|
|
|
|
},
|
2020-07-02 08:32:56 -05:00
|
|
|
};
|
2020-07-14 10:03:44 -05:00
|
|
|
|
2020-07-02 08:32:56 -05:00
|
|
|
let registration = lsp_types::Registration {
|
|
|
|
id: "textDocument/didSave".to_string(),
|
|
|
|
method: "textDocument/didSave".to_string(),
|
2020-07-14 10:03:44 -05:00
|
|
|
register_options: Some(serde_json::to_value(save_registration_options).unwrap()),
|
2020-07-02 08:32:56 -05:00
|
|
|
};
|
|
|
|
self.send_request::<lsp_types::request::RegisterCapability>(
|
|
|
|
lsp_types::RegistrationParams { registrations: vec![registration] },
|
|
|
|
|_, _| (),
|
|
|
|
);
|
|
|
|
|
2020-07-02 09:47:42 -05:00
|
|
|
self.fetch_workspaces();
|
2020-06-25 16:26:21 -05:00
|
|
|
|
2020-06-25 10:14:11 -05:00
|
|
|
while let Some(event) = self.next_event(&inbox) {
|
2020-06-25 12:01:16 -05:00
|
|
|
if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
|
|
|
|
if not.method == lsp_types::notification::Exit::METHOD {
|
|
|
|
return Ok(());
|
|
|
|
}
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
2020-06-25 16:26:21 -05:00
|
|
|
self.handle_event(event)?
|
2020-06-25 12:01:16 -05:00
|
|
|
}
|
2020-06-25 16:26:21 -05:00
|
|
|
|
2020-06-25 12:01:16 -05:00
|
|
|
Err("client exited without proper shutdown sequence")?
|
|
|
|
}
|
2020-06-25 10:14:11 -05:00
|
|
|
|
2020-07-23 03:26:56 -05:00
|
|
|
fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
|
|
|
|
select! {
|
|
|
|
recv(inbox) -> msg =>
|
|
|
|
msg.ok().map(Event::Lsp),
|
|
|
|
|
|
|
|
recv(self.task_pool.receiver) -> task =>
|
|
|
|
Some(Event::Task(task.unwrap())),
|
|
|
|
|
|
|
|
recv(self.loader.receiver) -> task =>
|
|
|
|
Some(Event::Vfs(task.unwrap())),
|
|
|
|
|
|
|
|
recv(self.flycheck_receiver) -> task =>
|
|
|
|
Some(Event::Flycheck(task.unwrap())),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-25 16:26:21 -05:00
|
|
|
fn handle_event(&mut self, event: Event) -> Result<()> {
|
2020-06-25 12:01:16 -05:00
|
|
|
let loop_start = Instant::now();
|
|
|
|
// NOTE: don't count blocking select! call as a loop-turn time
|
2020-08-12 09:32:36 -05:00
|
|
|
let _p = profile::span("GlobalState::handle_event");
|
2020-06-25 12:01:16 -05:00
|
|
|
|
2020-06-25 16:26:21 -05:00
|
|
|
log::info!("handle_event({:?})", event);
|
2020-08-25 04:27:22 -05:00
|
|
|
let task_queue_len = self.task_pool.handle.len();
|
|
|
|
if task_queue_len > 0 {
|
|
|
|
log::info!("task queue len: {}", task_queue_len);
|
2020-06-25 12:01:16 -05:00
|
|
|
}
|
|
|
|
|
2020-07-01 09:01:03 -05:00
|
|
|
let prev_status = self.status;
|
2020-06-25 12:01:16 -05:00
|
|
|
match event {
|
|
|
|
Event::Lsp(msg) => match msg {
|
|
|
|
lsp_server::Message::Request(req) => self.on_request(loop_start, req)?,
|
|
|
|
lsp_server::Message::Notification(not) => {
|
|
|
|
self.on_notification(not)?;
|
|
|
|
}
|
2020-06-26 10:07:14 -05:00
|
|
|
lsp_server::Message::Response(resp) => self.complete_request(resp),
|
2020-06-25 12:01:16 -05:00
|
|
|
},
|
2020-11-02 10:06:49 -06:00
|
|
|
Event::Task(mut task) => {
|
|
|
|
let _p = profile::span("GlobalState::handle_event/task");
|
2020-11-02 11:48:19 -06:00
|
|
|
let mut prime_caches_progress = Vec::new();
|
2020-11-02 10:06:49 -06:00
|
|
|
loop {
|
|
|
|
match task {
|
|
|
|
Task::Response(response) => self.respond(response),
|
|
|
|
Task::Diagnostics(diagnostics_per_file) => {
|
|
|
|
for (file_id, diagnostics) in diagnostics_per_file {
|
|
|
|
self.diagnostics.set_native_diagnostics(file_id, diagnostics)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Task::Workspaces(workspaces) => self.switch_workspaces(workspaces),
|
2020-11-02 11:48:19 -06:00
|
|
|
Task::PrimeCaches(progress) => match progress {
|
|
|
|
PrimeCachesProgress::Started => prime_caches_progress.push(progress),
|
|
|
|
PrimeCachesProgress::StartedOnCrate { .. } => {
|
|
|
|
match prime_caches_progress.last_mut() {
|
|
|
|
Some(last @ PrimeCachesProgress::StartedOnCrate { .. }) => {
|
|
|
|
// Coalesce subsequent update events.
|
|
|
|
*last = progress;
|
|
|
|
}
|
|
|
|
_ => prime_caches_progress.push(progress),
|
|
|
|
}
|
2020-11-02 10:06:49 -06:00
|
|
|
}
|
2020-11-02 11:48:19 -06:00
|
|
|
PrimeCachesProgress::Finished => prime_caches_progress.push(progress),
|
|
|
|
},
|
2020-06-25 12:23:52 -05:00
|
|
|
}
|
2020-11-02 10:06:49 -06:00
|
|
|
// Coalesce multiple task events into one loop turn
|
|
|
|
task = match self.task_pool.receiver.try_recv() {
|
|
|
|
Ok(task) => task,
|
|
|
|
Err(_) => break,
|
|
|
|
};
|
2020-06-25 12:23:52 -05:00
|
|
|
}
|
2020-11-02 10:06:49 -06:00
|
|
|
|
2020-11-02 11:48:19 -06:00
|
|
|
for progress in prime_caches_progress {
|
2020-10-06 10:58:03 -05:00
|
|
|
let (state, message, fraction);
|
|
|
|
match progress {
|
|
|
|
PrimeCachesProgress::Started => {
|
|
|
|
state = Progress::Begin;
|
|
|
|
message = None;
|
|
|
|
fraction = 0.0;
|
|
|
|
}
|
|
|
|
PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => {
|
|
|
|
state = Progress::Report;
|
|
|
|
message = Some(format!("{}/{} ({})", n_done, n_total, on_crate));
|
|
|
|
fraction = Progress::fraction(n_done, n_total);
|
|
|
|
}
|
|
|
|
PrimeCachesProgress::Finished => {
|
|
|
|
state = Progress::End;
|
|
|
|
message = None;
|
|
|
|
fraction = 1.0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-11-02 11:33:46 -06:00
|
|
|
self.report_progress("indexing", state, message, Some(fraction));
|
2020-10-06 10:58:03 -05:00
|
|
|
}
|
2020-11-02 10:06:49 -06:00
|
|
|
}
|
2020-07-10 15:29:40 -05:00
|
|
|
Event::Vfs(mut task) => {
|
2020-08-12 09:32:36 -05:00
|
|
|
let _p = profile::span("GlobalState::handle_event/vfs");
|
2020-07-10 15:29:40 -05:00
|
|
|
loop {
|
|
|
|
match task {
|
|
|
|
vfs::loader::Message::Loaded { files } => {
|
|
|
|
let vfs = &mut self.vfs.write().0;
|
|
|
|
for (path, contents) in files {
|
|
|
|
let path = VfsPath::from(path);
|
2020-07-21 13:07:42 -05:00
|
|
|
if !self.mem_docs.contains_key(&path) {
|
2020-07-10 15:29:40 -05:00
|
|
|
vfs.set_file_contents(path, contents)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
vfs::loader::Message::Progress { n_total, n_done } => {
|
|
|
|
if n_total == 0 {
|
|
|
|
self.transition(Status::Invalid);
|
|
|
|
} else {
|
|
|
|
let state = if n_done == 0 {
|
|
|
|
self.transition(Status::Loading);
|
|
|
|
Progress::Begin
|
|
|
|
} else if n_done < n_total {
|
|
|
|
Progress::Report
|
|
|
|
} else {
|
|
|
|
assert_eq!(n_done, n_total);
|
|
|
|
self.transition(Status::Ready);
|
|
|
|
Progress::End
|
|
|
|
};
|
|
|
|
self.report_progress(
|
|
|
|
"roots scanned",
|
|
|
|
state,
|
|
|
|
Some(format!("{}/{}", n_done, n_total)),
|
2020-10-07 05:15:37 -05:00
|
|
|
Some(Progress::fraction(n_done, n_total)),
|
2020-07-10 15:29:40 -05:00
|
|
|
)
|
|
|
|
}
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
|
|
|
}
|
2020-07-10 15:29:40 -05:00
|
|
|
// Coalesce many VFS event into a single loop turn
|
|
|
|
task = match self.loader.receiver.try_recv() {
|
|
|
|
Ok(task) => task,
|
|
|
|
Err(_) => break,
|
2020-07-01 06:05:34 -05:00
|
|
|
}
|
2020-06-25 12:01:16 -05:00
|
|
|
}
|
2020-07-10 15:29:40 -05:00
|
|
|
}
|
2020-06-25 12:01:16 -05:00
|
|
|
Event::Flycheck(task) => match task {
|
|
|
|
flycheck::Message::AddDiagnostic { workspace_root, diagnostic } => {
|
|
|
|
let diagnostics = crate::diagnostics::to_proto::map_rust_diagnostic_to_lsp(
|
2020-08-18 09:03:15 -05:00
|
|
|
&self.config.diagnostics_map,
|
2020-06-25 12:01:16 -05:00
|
|
|
&diagnostic,
|
|
|
|
&workspace_root,
|
|
|
|
);
|
|
|
|
for diag in diagnostics {
|
2020-07-09 08:35:52 -05:00
|
|
|
match url_to_file_id(&self.vfs.read().0, &diag.url) {
|
2020-06-26 05:02:59 -05:00
|
|
|
Ok(file_id) => self.diagnostics.add_check_diagnostic(
|
|
|
|
file_id,
|
|
|
|
diag.diagnostic,
|
|
|
|
diag.fixes,
|
|
|
|
),
|
|
|
|
Err(err) => {
|
|
|
|
log::error!("File with cargo diagnostic not found in VFS: {}", err);
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
2020-06-25 12:01:16 -05:00
|
|
|
}
|
2020-06-25 10:14:11 -05:00
|
|
|
|
2020-09-17 11:50:30 -05:00
|
|
|
flycheck::Message::Progress { id, progress } => {
|
|
|
|
let (state, message) = match progress {
|
2020-06-26 09:17:22 -05:00
|
|
|
flycheck::Progress::DidStart => {
|
|
|
|
self.diagnostics.clear_check();
|
|
|
|
(Progress::Begin, None)
|
2020-06-26 09:33:57 -05:00
|
|
|
}
|
2020-06-25 12:01:16 -05:00
|
|
|
flycheck::Progress::DidCheckCrate(target) => {
|
|
|
|
(Progress::Report, Some(target))
|
|
|
|
}
|
2020-06-28 16:42:44 -05:00
|
|
|
flycheck::Progress::DidCancel => (Progress::End, None),
|
|
|
|
flycheck::Progress::DidFinish(result) => {
|
|
|
|
if let Err(err) = result {
|
|
|
|
log::error!("cargo check failed: {}", err)
|
|
|
|
}
|
2020-06-26 09:17:22 -05:00
|
|
|
(Progress::End, None)
|
|
|
|
}
|
2020-06-25 12:01:16 -05:00
|
|
|
};
|
|
|
|
|
2020-09-17 11:50:30 -05:00
|
|
|
// When we're running multiple flychecks, we have to include a disambiguator in
|
|
|
|
// the title, or the editor complains. Note that this is a user-facing string.
|
|
|
|
let title = if self.flycheck.len() == 1 {
|
|
|
|
"cargo check".to_string()
|
|
|
|
} else {
|
|
|
|
format!("cargo check (#{})", id + 1)
|
|
|
|
};
|
|
|
|
self.report_progress(&title, state, message, None);
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
2020-06-25 12:01:16 -05:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
let state_changed = self.process_changes();
|
2020-07-01 09:01:03 -05:00
|
|
|
if prev_status == Status::Loading && self.status == Status::Ready {
|
2020-09-17 11:50:30 -05:00
|
|
|
for flycheck in &self.flycheck {
|
2020-07-15 07:37:44 -05:00
|
|
|
flycheck.update();
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
2020-06-25 12:01:16 -05:00
|
|
|
}
|
2020-06-25 10:14:11 -05:00
|
|
|
|
2020-07-01 09:01:03 -05:00
|
|
|
if self.status == Status::Ready && (state_changed || prev_status == Status::Loading) {
|
2020-06-25 12:01:16 -05:00
|
|
|
let subscriptions = self
|
|
|
|
.mem_docs
|
2020-07-21 13:07:42 -05:00
|
|
|
.keys()
|
2020-06-25 12:01:16 -05:00
|
|
|
.map(|path| self.vfs.read().0.file_id(&path).unwrap())
|
|
|
|
.collect::<Vec<_>>();
|
2020-06-25 10:14:11 -05:00
|
|
|
|
2020-06-25 12:01:16 -05:00
|
|
|
self.update_file_notifications_on_threadpool(subscriptions);
|
2020-10-23 16:36:22 -05:00
|
|
|
|
|
|
|
// Refresh semantic tokens if the client supports it.
|
|
|
|
if self.config.semantic_tokens_refresh {
|
|
|
|
self.semantic_tokens_cache.lock().clear();
|
|
|
|
self.send_request::<lsp_types::request::SemanticTokensRefesh>((), |_, _| ());
|
|
|
|
}
|
2020-06-25 12:01:16 -05:00
|
|
|
}
|
2020-06-25 10:14:11 -05:00
|
|
|
|
2020-06-25 15:45:35 -05:00
|
|
|
if let Some(diagnostic_changes) = self.diagnostics.take_changes() {
|
|
|
|
for file_id in diagnostic_changes {
|
|
|
|
let url = file_id_to_url(&self.vfs.read().0, file_id);
|
|
|
|
let diagnostics = self.diagnostics.diagnostics_for(file_id).cloned().collect();
|
2020-07-21 13:07:42 -05:00
|
|
|
let version = from_proto::vfs_path(&url)
|
2020-07-24 08:01:48 -05:00
|
|
|
.map(|path| self.mem_docs.get(&path)?.version)
|
2020-07-21 13:07:42 -05:00
|
|
|
.unwrap_or_default();
|
|
|
|
|
2020-06-26 10:07:14 -05:00
|
|
|
self.send_notification::<lsp_types::notification::PublishDiagnostics>(
|
2020-07-21 13:07:42 -05:00
|
|
|
lsp_types::PublishDiagnosticsParams { uri: url, diagnostics, version },
|
2020-06-26 10:07:14 -05:00
|
|
|
);
|
2020-06-25 15:45:35 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-25 12:01:16 -05:00
|
|
|
let loop_duration = loop_start.elapsed();
|
|
|
|
if loop_duration > Duration::from_millis(100) {
|
2020-06-25 15:45:35 -05:00
|
|
|
log::warn!("overly long loop turn: {:?}", loop_duration);
|
2020-06-25 12:01:16 -05:00
|
|
|
if env::var("RA_PROFILE").is_ok() {
|
|
|
|
self.show_message(
|
|
|
|
lsp_types::MessageType::Error,
|
|
|
|
format!("overly long loop turn: {:?}", loop_duration),
|
|
|
|
)
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
|
|
|
}
|
2020-06-25 12:01:16 -05:00
|
|
|
Ok(())
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
fn on_request(&mut self, request_received: Instant, req: Request) -> Result<()> {
|
2020-06-26 10:07:14 -05:00
|
|
|
self.register_request(&req, request_received);
|
2020-06-25 12:23:52 -05:00
|
|
|
|
2020-08-09 15:27:48 -05:00
|
|
|
if self.shutdown_requested {
|
|
|
|
self.respond(Response::new_err(
|
|
|
|
req.id,
|
|
|
|
lsp_server::ErrorCode::InvalidRequest as i32,
|
|
|
|
"Shutdown already requested.".to_owned(),
|
|
|
|
));
|
|
|
|
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2020-08-09 16:41:48 -05:00
|
|
|
if self.status == Status::Loading && req.method != "shutdown" {
|
2020-08-08 13:53:38 -05:00
|
|
|
self.respond(lsp_server::Response::new_err(
|
|
|
|
req.id,
|
|
|
|
// FIXME: i32 should impl From<ErrorCode> (from() guarantees lossless conversion)
|
|
|
|
lsp_server::ErrorCode::ContentModified as i32,
|
|
|
|
"Rust Analyzer is still loading...".to_owned(),
|
|
|
|
));
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2020-06-25 12:23:52 -05:00
|
|
|
RequestDispatcher { req: Some(req), global_state: self }
|
2020-07-02 09:47:42 -05:00
|
|
|
.on_sync::<lsp_ext::ReloadWorkspace>(|s, ()| Ok(s.fetch_workspaces()))?
|
2020-06-25 10:14:11 -05:00
|
|
|
.on_sync::<lsp_ext::JoinLines>(|s, p| handlers::handle_join_lines(s.snapshot(), p))?
|
|
|
|
.on_sync::<lsp_ext::OnEnter>(|s, p| handlers::handle_on_enter(s.snapshot(), p))?
|
2020-08-09 15:27:48 -05:00
|
|
|
.on_sync::<lsp_types::request::Shutdown>(|s, ()| {
|
|
|
|
s.shutdown_requested = true;
|
|
|
|
Ok(())
|
|
|
|
})?
|
2020-06-25 10:14:11 -05:00
|
|
|
.on_sync::<lsp_types::request::SelectionRangeRequest>(|s, p| {
|
|
|
|
handlers::handle_selection_range(s.snapshot(), p)
|
|
|
|
})?
|
|
|
|
.on_sync::<lsp_ext::MatchingBrace>(|s, p| {
|
|
|
|
handlers::handle_matching_brace(s.snapshot(), p)
|
|
|
|
})?
|
2020-07-07 05:10:14 -05:00
|
|
|
.on_sync::<lsp_ext::MemoryUsage>(|s, p| handlers::handle_memory_usage(s, p))?
|
2020-10-30 13:38:29 -05:00
|
|
|
.on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)
|
|
|
|
.on::<lsp_ext::SyntaxTree>(handlers::handle_syntax_tree)
|
|
|
|
.on::<lsp_ext::ExpandMacro>(handlers::handle_expand_macro)
|
|
|
|
.on::<lsp_ext::ParentModule>(handlers::handle_parent_module)
|
|
|
|
.on::<lsp_ext::Runnables>(handlers::handle_runnables)
|
|
|
|
.on::<lsp_ext::InlayHints>(handlers::handle_inlay_hints)
|
|
|
|
.on::<lsp_ext::CodeActionRequest>(handlers::handle_code_action)
|
|
|
|
.on::<lsp_ext::ResolveCodeActionRequest>(handlers::handle_resolve_code_action)
|
|
|
|
.on::<lsp_ext::HoverRequest>(handlers::handle_hover)
|
|
|
|
.on::<lsp_ext::ExternalDocs>(handlers::handle_open_docs)
|
|
|
|
.on::<lsp_types::request::OnTypeFormatting>(handlers::handle_on_type_formatting)
|
|
|
|
.on::<lsp_types::request::DocumentSymbolRequest>(handlers::handle_document_symbol)
|
|
|
|
.on::<lsp_types::request::WorkspaceSymbol>(handlers::handle_workspace_symbol)
|
|
|
|
.on::<lsp_types::request::GotoDefinition>(handlers::handle_goto_definition)
|
|
|
|
.on::<lsp_types::request::GotoImplementation>(handlers::handle_goto_implementation)
|
|
|
|
.on::<lsp_types::request::GotoTypeDefinition>(handlers::handle_goto_type_definition)
|
|
|
|
.on::<lsp_types::request::Completion>(handlers::handle_completion)
|
|
|
|
.on::<lsp_types::request::CodeLensRequest>(handlers::handle_code_lens)
|
|
|
|
.on::<lsp_types::request::CodeLensResolve>(handlers::handle_code_lens_resolve)
|
|
|
|
.on::<lsp_types::request::FoldingRangeRequest>(handlers::handle_folding_range)
|
|
|
|
.on::<lsp_types::request::SignatureHelpRequest>(handlers::handle_signature_help)
|
|
|
|
.on::<lsp_types::request::PrepareRenameRequest>(handlers::handle_prepare_rename)
|
|
|
|
.on::<lsp_types::request::Rename>(handlers::handle_rename)
|
|
|
|
.on::<lsp_types::request::References>(handlers::handle_references)
|
|
|
|
.on::<lsp_types::request::Formatting>(handlers::handle_formatting)
|
|
|
|
.on::<lsp_types::request::DocumentHighlightRequest>(handlers::handle_document_highlight)
|
|
|
|
.on::<lsp_types::request::CallHierarchyPrepare>(handlers::handle_call_hierarchy_prepare)
|
2020-06-25 10:14:11 -05:00
|
|
|
.on::<lsp_types::request::CallHierarchyIncomingCalls>(
|
|
|
|
handlers::handle_call_hierarchy_incoming,
|
2020-10-30 13:38:29 -05:00
|
|
|
)
|
2020-06-25 10:14:11 -05:00
|
|
|
.on::<lsp_types::request::CallHierarchyOutgoingCalls>(
|
|
|
|
handlers::handle_call_hierarchy_outgoing,
|
2020-10-30 13:38:29 -05:00
|
|
|
)
|
2020-09-01 11:53:07 -05:00
|
|
|
.on::<lsp_types::request::SemanticTokensFullRequest>(
|
|
|
|
handlers::handle_semantic_tokens_full,
|
2020-10-30 13:38:29 -05:00
|
|
|
)
|
2020-09-01 11:53:07 -05:00
|
|
|
.on::<lsp_types::request::SemanticTokensFullDeltaRequest>(
|
|
|
|
handlers::handle_semantic_tokens_full_delta,
|
2020-10-30 13:38:29 -05:00
|
|
|
)
|
2020-06-25 10:14:11 -05:00
|
|
|
.on::<lsp_types::request::SemanticTokensRangeRequest>(
|
|
|
|
handlers::handle_semantic_tokens_range,
|
2020-10-30 13:38:29 -05:00
|
|
|
)
|
|
|
|
.on::<lsp_ext::Ssr>(handlers::handle_ssr)
|
2020-06-25 10:14:11 -05:00
|
|
|
.finish();
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
fn on_notification(&mut self, not: Notification) -> Result<()> {
|
2020-06-25 10:50:47 -05:00
|
|
|
NotificationDispatcher { not: Some(not), global_state: self }
|
|
|
|
.on::<lsp_types::notification::Cancel>(|this, params| {
|
|
|
|
let id: lsp_server::RequestId = match params.id {
|
|
|
|
lsp_types::NumberOrString::Number(id) => id.into(),
|
|
|
|
lsp_types::NumberOrString::String(id) => id.into(),
|
2020-06-25 10:14:11 -05:00
|
|
|
};
|
2020-06-26 10:07:14 -05:00
|
|
|
this.cancel(id);
|
2020-06-25 10:50:47 -05:00
|
|
|
Ok(())
|
|
|
|
})?
|
|
|
|
.on::<lsp_types::notification::DidOpenTextDocument>(|this, params| {
|
2020-06-25 10:14:11 -05:00
|
|
|
if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
|
2020-07-21 13:07:42 -05:00
|
|
|
if this
|
|
|
|
.mem_docs
|
2020-07-24 08:01:48 -05:00
|
|
|
.insert(path.clone(), DocumentData::new(params.text_document.version))
|
2020-07-21 13:07:42 -05:00
|
|
|
.is_some()
|
|
|
|
{
|
2020-06-25 10:14:11 -05:00
|
|
|
log::error!("duplicate DidOpenTextDocument: {}", path)
|
|
|
|
}
|
2020-06-25 10:50:47 -05:00
|
|
|
this.vfs
|
2020-06-25 10:14:11 -05:00
|
|
|
.write()
|
|
|
|
.0
|
|
|
|
.set_file_contents(path, Some(params.text_document.text.into_bytes()));
|
|
|
|
}
|
2020-06-25 10:50:47 -05:00
|
|
|
Ok(())
|
|
|
|
})?
|
|
|
|
.on::<lsp_types::notification::DidChangeTextDocument>(|this, params| {
|
2020-06-25 10:14:11 -05:00
|
|
|
if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
|
2020-08-03 15:35:32 -05:00
|
|
|
let doc = this.mem_docs.get_mut(&path).unwrap();
|
2020-06-25 10:50:47 -05:00
|
|
|
let vfs = &mut this.vfs.write().0;
|
2020-06-25 10:14:11 -05:00
|
|
|
let file_id = vfs.file_id(&path).unwrap();
|
|
|
|
let mut text = String::from_utf8(vfs.file_contents(file_id).to_vec()).unwrap();
|
2020-08-03 15:35:32 -05:00
|
|
|
apply_document_changes(&mut text, params.content_changes);
|
2020-07-21 13:07:42 -05:00
|
|
|
|
2020-07-23 16:40:43 -05:00
|
|
|
// The version passed in DidChangeTextDocument is the version after all edits are applied
|
|
|
|
// so we should apply it before the vfs is notified.
|
2020-08-03 15:35:32 -05:00
|
|
|
doc.version = params.text_document.version;
|
2020-07-23 16:40:43 -05:00
|
|
|
|
|
|
|
vfs.set_file_contents(path.clone(), Some(text.into_bytes()));
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
2020-06-25 10:50:47 -05:00
|
|
|
Ok(())
|
|
|
|
})?
|
|
|
|
.on::<lsp_types::notification::DidCloseTextDocument>(|this, params| {
|
2020-07-21 13:18:22 -05:00
|
|
|
let mut version = None;
|
2020-06-25 10:14:11 -05:00
|
|
|
if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
|
2020-07-21 13:18:22 -05:00
|
|
|
match this.mem_docs.remove(&path) {
|
2020-07-24 08:01:48 -05:00
|
|
|
Some(doc) => version = doc.version,
|
2020-07-21 13:18:22 -05:00
|
|
|
None => log::error!("orphan DidCloseTextDocument: {}", path),
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
2020-07-21 13:18:22 -05:00
|
|
|
|
2020-08-05 20:35:35 -05:00
|
|
|
this.semantic_tokens_cache.lock().remove(¶ms.text_document.uri);
|
2020-07-24 16:55:17 -05:00
|
|
|
|
2020-06-25 10:14:11 -05:00
|
|
|
if let Some(path) = path.as_path() {
|
2020-06-25 17:27:39 -05:00
|
|
|
this.loader.handle.invalidate(path.to_path_buf());
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
|
|
|
}
|
2020-07-21 13:18:22 -05:00
|
|
|
|
|
|
|
// Clear the diagnostics for the previously known version of the file.
|
|
|
|
// This prevents stale "cargo check" diagnostics if the file is
|
|
|
|
// closed, "cargo check" is run and then the file is reopened.
|
2020-06-26 10:07:14 -05:00
|
|
|
this.send_notification::<lsp_types::notification::PublishDiagnostics>(
|
|
|
|
lsp_types::PublishDiagnosticsParams {
|
|
|
|
uri: params.text_document.uri,
|
|
|
|
diagnostics: Vec::new(),
|
2020-07-21 13:18:22 -05:00
|
|
|
version,
|
2020-06-26 10:07:14 -05:00
|
|
|
},
|
|
|
|
);
|
2020-06-25 10:50:47 -05:00
|
|
|
Ok(())
|
|
|
|
})?
|
2020-07-02 08:32:56 -05:00
|
|
|
.on::<lsp_types::notification::DidSaveTextDocument>(|this, params| {
|
2020-09-17 11:50:30 -05:00
|
|
|
for flycheck in &this.flycheck {
|
2020-07-15 07:37:44 -05:00
|
|
|
flycheck.update();
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
2020-07-10 11:48:39 -05:00
|
|
|
if let Ok(abs_path) = from_proto::abs_path(¶ms.text_document.uri) {
|
|
|
|
this.maybe_refresh(&[(abs_path, ChangeKind::Modify)]);
|
|
|
|
}
|
2020-06-25 10:50:47 -05:00
|
|
|
Ok(())
|
|
|
|
})?
|
|
|
|
.on::<lsp_types::notification::DidChangeConfiguration>(|this, _params| {
|
2020-06-25 10:14:11 -05:00
|
|
|
// As stated in https://github.com/microsoft/language-server-protocol/issues/676,
|
|
|
|
// this notification's parameters should be ignored and the actual config queried separately.
|
2020-06-26 10:07:14 -05:00
|
|
|
this.send_request::<lsp_types::request::WorkspaceConfiguration>(
|
2020-06-25 10:14:11 -05:00
|
|
|
lsp_types::ConfigurationParams {
|
|
|
|
items: vec![lsp_types::ConfigurationItem {
|
|
|
|
scope_uri: None,
|
|
|
|
section: Some("rust-analyzer".to_string()),
|
|
|
|
}],
|
|
|
|
},
|
|
|
|
|this, resp| {
|
|
|
|
log::debug!("config update response: '{:?}", resp);
|
|
|
|
let Response { error, result, .. } = resp;
|
|
|
|
|
|
|
|
match (error, result) {
|
|
|
|
(Some(err), _) => {
|
|
|
|
log::error!("failed to fetch the server settings: {:?}", err)
|
|
|
|
}
|
2020-07-09 17:28:12 -05:00
|
|
|
(None, Some(mut configs)) => {
|
|
|
|
if let Some(json) = configs.get_mut(0) {
|
2020-07-20 16:25:48 -05:00
|
|
|
// Note that json can be null according to the spec if the client can't
|
|
|
|
// provide a configuration. This is handled in Config::update below.
|
2020-06-25 10:14:11 -05:00
|
|
|
let mut config = this.config.clone();
|
2020-07-09 17:28:12 -05:00
|
|
|
config.update(json.take());
|
2020-06-25 10:14:11 -05:00
|
|
|
this.update_configuration(config);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
(None, None) => log::error!(
|
|
|
|
"received empty server settings response from the client"
|
|
|
|
),
|
|
|
|
}
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
return Ok(());
|
2020-06-25 10:50:47 -05:00
|
|
|
})?
|
|
|
|
.on::<lsp_types::notification::DidChangeWatchedFiles>(|this, params| {
|
2020-06-25 10:14:11 -05:00
|
|
|
for change in params.changes {
|
|
|
|
if let Ok(path) = from_proto::abs_path(&change.uri) {
|
2020-06-25 17:27:39 -05:00
|
|
|
this.loader.handle.invalidate(path);
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
|
|
|
}
|
2020-06-25 10:50:47 -05:00
|
|
|
Ok(())
|
|
|
|
})?
|
|
|
|
.finish();
|
2020-06-25 10:14:11 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
fn update_file_notifications_on_threadpool(&mut self, subscriptions: Vec<FileId>) {
|
|
|
|
log::trace!("updating notifications for {:?}", subscriptions);
|
|
|
|
if self.config.publish_diagnostics {
|
|
|
|
let snapshot = self.snapshot();
|
|
|
|
let subscriptions = subscriptions.clone();
|
2020-06-25 17:27:39 -05:00
|
|
|
self.task_pool.handle.spawn(move || {
|
2020-06-25 10:14:11 -05:00
|
|
|
let diagnostics = subscriptions
|
|
|
|
.into_iter()
|
|
|
|
.filter_map(|file_id| {
|
|
|
|
handlers::publish_diagnostics(&snapshot, file_id)
|
|
|
|
.map_err(|err| {
|
|
|
|
if !is_canceled(&*err) {
|
|
|
|
log::error!("failed to compute diagnostics: {:?}", err);
|
|
|
|
}
|
|
|
|
()
|
|
|
|
})
|
|
|
|
.ok()
|
2020-06-25 15:45:35 -05:00
|
|
|
.map(|diags| (file_id, diags))
|
2020-06-25 10:14:11 -05:00
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
Task::Diagnostics(diagnostics)
|
|
|
|
})
|
|
|
|
}
|
2020-10-06 10:58:03 -05:00
|
|
|
self.task_pool.handle.spawn_with_sender({
|
2020-06-25 10:14:11 -05:00
|
|
|
let snap = self.snapshot();
|
2020-10-06 10:58:03 -05:00
|
|
|
move |sender| {
|
|
|
|
snap.analysis
|
|
|
|
.prime_caches(|progress| {
|
|
|
|
sender.send(Task::PrimeCaches(progress)).unwrap();
|
|
|
|
})
|
|
|
|
.unwrap_or_else(|_: Canceled| {
|
|
|
|
// Pretend that we're done, so that the progress bar is removed. Otherwise
|
|
|
|
// the editor may complain about it already existing.
|
|
|
|
sender.send(Task::PrimeCaches(PrimeCachesProgress::Finished)).unwrap()
|
|
|
|
});
|
2020-06-25 10:14:11 -05:00
|
|
|
}
|
|
|
|
});
|
2019-08-31 06:47:37 -05:00
|
|
|
}
|
2018-09-01 09:40:45 -05:00
|
|
|
}
|