Auto merge of #14901 - Veykril:errors, r=Veykril
internal: Move flycheck and config errors to status notification cc https://github.com/rust-lang/rust-analyzer/issues/14193
This commit is contained in:
commit
eee6872647
@ -720,11 +720,11 @@ pub struct ClientCommandsConfig {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ConfigUpdateError {
|
||||
pub struct ConfigError {
|
||||
errors: Vec<(String, serde_json::Error)>,
|
||||
}
|
||||
|
||||
impl fmt::Display for ConfigUpdateError {
|
||||
impl fmt::Display for ConfigError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let errors = self.errors.iter().format_with("\n", |(key, e), f| {
|
||||
f(key)?;
|
||||
@ -733,8 +733,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
});
|
||||
write!(
|
||||
f,
|
||||
"rust-analyzer found {} invalid config value{}:\n{}",
|
||||
self.errors.len(),
|
||||
"invalid config value{}:\n{}",
|
||||
if self.errors.len() == 1 { "" } else { "s" },
|
||||
errors
|
||||
)
|
||||
@ -777,7 +776,7 @@ pub fn add_workspaces(&mut self, paths: impl Iterator<Item = AbsPathBuf>) {
|
||||
self.workspace_roots.extend(paths);
|
||||
}
|
||||
|
||||
pub fn update(&mut self, mut json: serde_json::Value) -> Result<(), ConfigUpdateError> {
|
||||
pub fn update(&mut self, mut json: serde_json::Value) -> Result<(), ConfigError> {
|
||||
tracing::info!("updating config from JSON: {:#}", json);
|
||||
if json.is_null() || json.as_object().map_or(false, |it| it.is_empty()) {
|
||||
return Ok(());
|
||||
@ -824,7 +823,7 @@ pub fn update(&mut self, mut json: serde_json::Value) -> Result<(), ConfigUpdate
|
||||
if errors.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ConfigUpdateError { errors })
|
||||
Err(ConfigError { errors })
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
use vfs::AnchoredPathBuf;
|
||||
|
||||
use crate::{
|
||||
config::Config,
|
||||
config::{Config, ConfigError},
|
||||
diagnostics::{CheckFixes, DiagnosticCollection},
|
||||
from_proto,
|
||||
line_index::{LineEndings, LineIndex},
|
||||
@ -52,24 +52,33 @@ pub(crate) struct Handle<H, C> {
|
||||
pub(crate) struct GlobalState {
|
||||
sender: Sender<lsp_server::Message>,
|
||||
req_queue: ReqQueue,
|
||||
|
||||
pub(crate) task_pool: Handle<TaskPool<Task>, Receiver<Task>>,
|
||||
pub(crate) loader: Handle<Box<dyn vfs::loader::Handle>, Receiver<vfs::loader::Message>>,
|
||||
|
||||
pub(crate) config: Arc<Config>,
|
||||
pub(crate) config_errors: Option<ConfigError>,
|
||||
pub(crate) analysis_host: AnalysisHost,
|
||||
pub(crate) diagnostics: DiagnosticCollection,
|
||||
pub(crate) mem_docs: MemDocs,
|
||||
pub(crate) source_root_config: SourceRootConfig,
|
||||
pub(crate) semantic_tokens_cache: Arc<Mutex<FxHashMap<Url, SemanticTokens>>>,
|
||||
|
||||
// status
|
||||
pub(crate) shutdown_requested: bool,
|
||||
pub(crate) last_reported_status: Option<lsp_ext::ServerStatusParams>,
|
||||
pub(crate) source_root_config: SourceRootConfig,
|
||||
|
||||
// proc macros
|
||||
pub(crate) proc_macro_changed: bool,
|
||||
pub(crate) proc_macro_clients: Arc<[anyhow::Result<ProcMacroServer>]>,
|
||||
|
||||
// Flycheck
|
||||
pub(crate) flycheck: Arc<[FlycheckHandle]>,
|
||||
pub(crate) flycheck_sender: Sender<flycheck::Message>,
|
||||
pub(crate) flycheck_receiver: Receiver<flycheck::Message>,
|
||||
pub(crate) last_flycheck_error: Option<String>,
|
||||
|
||||
// VFS
|
||||
pub(crate) loader: Handle<Box<dyn vfs::loader::Handle>, Receiver<vfs::loader::Message>>,
|
||||
pub(crate) vfs: Arc<RwLock<(vfs::Vfs, IntMap<FileId, LineEndings>)>>,
|
||||
pub(crate) vfs_config_version: u32,
|
||||
pub(crate) vfs_progress_config_version: u32,
|
||||
@ -102,11 +111,12 @@ pub(crate) struct GlobalState {
|
||||
/// the user just adds comments or whitespace to Cargo.toml, we do not want
|
||||
/// to invalidate any salsa caches.
|
||||
pub(crate) workspaces: Arc<Vec<ProjectWorkspace>>,
|
||||
|
||||
// op queues
|
||||
pub(crate) fetch_workspaces_queue: OpQueue<(), Option<Vec<anyhow::Result<ProjectWorkspace>>>>,
|
||||
pub(crate) fetch_build_data_queue:
|
||||
OpQueue<(), (Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>)>,
|
||||
pub(crate) fetch_proc_macros_queue: OpQueue<Vec<ProcMacroPaths>, bool>,
|
||||
|
||||
pub(crate) prime_caches_queue: OpQueue,
|
||||
}
|
||||
|
||||
@ -160,6 +170,7 @@ pub(crate) fn new(sender: Sender<lsp_server::Message>, config: Config) -> Global
|
||||
shutdown_requested: false,
|
||||
last_reported_status: None,
|
||||
source_root_config: SourceRootConfig::default(),
|
||||
config_errors: Default::default(),
|
||||
|
||||
proc_macro_changed: false,
|
||||
// FIXME: use `Arc::from_iter` when it becomes available
|
||||
@ -169,6 +180,7 @@ pub(crate) fn new(sender: Sender<lsp_server::Message>, config: Config) -> Global
|
||||
flycheck: Arc::from(Vec::new()),
|
||||
flycheck_sender,
|
||||
flycheck_receiver,
|
||||
last_flycheck_error: None,
|
||||
|
||||
vfs: Arc::new(RwLock::new((vfs::Vfs::default(), IntMap::default()))),
|
||||
vfs_config_version: 0,
|
||||
|
@ -169,13 +169,7 @@ pub(crate) fn handle_did_change_configuration(
|
||||
// Note that json can be null according to the spec if the client can't
|
||||
// provide a configuration. This is handled in Config::update below.
|
||||
let mut config = Config::clone(&*this.config);
|
||||
if let Err(error) = config.update(json.take()) {
|
||||
this.show_message(
|
||||
lsp_types::MessageType::WARNING,
|
||||
error.to_string(),
|
||||
false,
|
||||
);
|
||||
}
|
||||
this.config_errors = config.update(json.take()).err();
|
||||
this.update_configuration(config);
|
||||
}
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ pub(crate) enum PrimeCachesProgress {
|
||||
|
||||
impl fmt::Debug for Event {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter<'_>| {
|
||||
let debug_non_verbose = |not: &Notification, f: &mut fmt::Formatter<'_>| {
|
||||
f.debug_struct("Notification").field("method", ¬.method).finish()
|
||||
};
|
||||
|
||||
@ -86,7 +86,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if notification_is::<lsp_types::notification::DidOpenTextDocument>(not)
|
||||
|| notification_is::<lsp_types::notification::DidChangeTextDocument>(not)
|
||||
{
|
||||
return debug_verbose_not(not, f);
|
||||
return debug_non_verbose(not, f);
|
||||
}
|
||||
}
|
||||
Event::Task(Task::Response(resp)) => {
|
||||
@ -112,38 +112,7 @@ fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
|
||||
self.update_status_or_notify();
|
||||
|
||||
if self.config.did_save_text_document_dynamic_registration() {
|
||||
let save_registration_options = lsp_types::TextDocumentSaveRegistrationOptions {
|
||||
include_text: Some(false),
|
||||
text_document_registration_options: lsp_types::TextDocumentRegistrationOptions {
|
||||
document_selector: Some(vec![
|
||||
lsp_types::DocumentFilter {
|
||||
language: None,
|
||||
scheme: None,
|
||||
pattern: Some("**/*.rs".into()),
|
||||
},
|
||||
lsp_types::DocumentFilter {
|
||||
language: None,
|
||||
scheme: None,
|
||||
pattern: Some("**/Cargo.toml".into()),
|
||||
},
|
||||
lsp_types::DocumentFilter {
|
||||
language: None,
|
||||
scheme: None,
|
||||
pattern: Some("**/Cargo.lock".into()),
|
||||
},
|
||||
]),
|
||||
},
|
||||
};
|
||||
|
||||
let registration = lsp_types::Registration {
|
||||
id: "textDocument/didSave".to_string(),
|
||||
method: "textDocument/didSave".to_string(),
|
||||
register_options: Some(serde_json::to_value(save_registration_options).unwrap()),
|
||||
};
|
||||
self.send_request::<lsp_types::request::RegisterCapability>(
|
||||
lsp_types::RegistrationParams { registrations: vec![registration] },
|
||||
|_, _| (),
|
||||
);
|
||||
self.register_did_save_capability();
|
||||
}
|
||||
|
||||
self.fetch_workspaces_queue.request_op("startup".to_string(), ());
|
||||
@ -152,17 +121,54 @@ fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
|
||||
}
|
||||
|
||||
while let Some(event) = self.next_event(&inbox) {
|
||||
if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
|
||||
if not.method == lsp_types::notification::Exit::METHOD {
|
||||
return Ok(());
|
||||
}
|
||||
if matches!(
|
||||
&event,
|
||||
Event::Lsp(lsp_server::Message::Notification(Notification { method, .. }))
|
||||
if method == lsp_types::notification::Exit::METHOD
|
||||
) {
|
||||
return Ok(());
|
||||
}
|
||||
self.handle_event(event)?
|
||||
self.handle_event(event)?;
|
||||
}
|
||||
|
||||
Err("client exited without proper shutdown sequence".into())
|
||||
}
|
||||
|
||||
fn register_did_save_capability(&mut self) {
|
||||
let save_registration_options = lsp_types::TextDocumentSaveRegistrationOptions {
|
||||
include_text: Some(false),
|
||||
text_document_registration_options: lsp_types::TextDocumentRegistrationOptions {
|
||||
document_selector: Some(vec![
|
||||
lsp_types::DocumentFilter {
|
||||
language: None,
|
||||
scheme: None,
|
||||
pattern: Some("**/*.rs".into()),
|
||||
},
|
||||
lsp_types::DocumentFilter {
|
||||
language: None,
|
||||
scheme: None,
|
||||
pattern: Some("**/Cargo.toml".into()),
|
||||
},
|
||||
lsp_types::DocumentFilter {
|
||||
language: None,
|
||||
scheme: None,
|
||||
pattern: Some("**/Cargo.lock".into()),
|
||||
},
|
||||
]),
|
||||
},
|
||||
};
|
||||
|
||||
let registration = lsp_types::Registration {
|
||||
id: "textDocument/didSave".to_string(),
|
||||
method: "textDocument/didSave".to_string(),
|
||||
register_options: Some(serde_json::to_value(save_registration_options).unwrap()),
|
||||
};
|
||||
self.send_request::<lsp_types::request::RegisterCapability>(
|
||||
lsp_types::RegistrationParams { registrations: vec![registration] },
|
||||
|_, _| (),
|
||||
);
|
||||
}
|
||||
|
||||
fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
|
||||
select! {
|
||||
recv(inbox) -> msg =>
|
||||
@ -184,20 +190,20 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
|
||||
// NOTE: don't count blocking select! call as a loop-turn time
|
||||
let _p = profile::span("GlobalState::handle_event");
|
||||
|
||||
let event_dbg = format!("{event:?}");
|
||||
tracing::debug!("{:?} handle_event({:?})", loop_start, event);
|
||||
let task_queue_len = self.task_pool.handle.len();
|
||||
if task_queue_len > 0 {
|
||||
tracing::info!("task queue len: {}", task_queue_len);
|
||||
let event_dbg_msg = format!("{event:?}");
|
||||
tracing::debug!("{:?} handle_event({})", loop_start, event_dbg_msg);
|
||||
if tracing::enabled!(tracing::Level::INFO) {
|
||||
let task_queue_len = self.task_pool.handle.len();
|
||||
if task_queue_len > 0 {
|
||||
tracing::info!("task queue len: {}", task_queue_len);
|
||||
}
|
||||
}
|
||||
|
||||
let was_quiescent = self.is_quiescent();
|
||||
match event {
|
||||
Event::Lsp(msg) => match msg {
|
||||
lsp_server::Message::Request(req) => self.on_new_request(loop_start, req),
|
||||
lsp_server::Message::Notification(not) => {
|
||||
self.on_notification(not)?;
|
||||
}
|
||||
lsp_server::Message::Notification(not) => self.on_notification(not)?,
|
||||
lsp_server::Message::Response(resp) => self.complete_request(resp),
|
||||
},
|
||||
Event::Task(task) => {
|
||||
@ -291,7 +297,8 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
if !was_quiescent || state_changed {
|
||||
let client_refresh = !was_quiescent || state_changed;
|
||||
if client_refresh {
|
||||
// Refresh semantic tokens if the client supports it.
|
||||
if self.config.semantic_tokens_refresh() {
|
||||
self.semantic_tokens_cache.lock().clear();
|
||||
@ -309,9 +316,9 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
if (!was_quiescent || state_changed || memdocs_added_or_removed)
|
||||
&& self.config.publish_diagnostics()
|
||||
{
|
||||
let update_diagnostics = (!was_quiescent || state_changed || memdocs_added_or_removed)
|
||||
&& self.config.publish_diagnostics();
|
||||
if update_diagnostics {
|
||||
self.update_diagnostics()
|
||||
}
|
||||
}
|
||||
@ -371,38 +378,40 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
|
||||
}
|
||||
|
||||
if let Some((cause, ())) = self.prime_caches_queue.should_start_op() {
|
||||
tracing::debug!(%cause, "will prime caches");
|
||||
let num_worker_threads = self.config.prime_caches_num_threads();
|
||||
|
||||
self.task_pool.handle.spawn_with_sender({
|
||||
let analysis = self.snapshot().analysis;
|
||||
move |sender| {
|
||||
sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
|
||||
let res = analysis.parallel_prime_caches(num_worker_threads, |progress| {
|
||||
let report = PrimeCachesProgress::Report(progress);
|
||||
sender.send(Task::PrimeCaches(report)).unwrap();
|
||||
});
|
||||
sender
|
||||
.send(Task::PrimeCaches(PrimeCachesProgress::End {
|
||||
cancelled: res.is_err(),
|
||||
}))
|
||||
.unwrap();
|
||||
}
|
||||
});
|
||||
self.prime_caches(cause);
|
||||
}
|
||||
|
||||
self.update_status_or_notify();
|
||||
|
||||
let loop_duration = loop_start.elapsed();
|
||||
if loop_duration > Duration::from_millis(100) && was_quiescent {
|
||||
tracing::warn!("overly long loop turn took {loop_duration:?}: {event_dbg}");
|
||||
tracing::warn!("overly long loop turn took {loop_duration:?}: {event_dbg_msg}");
|
||||
self.poke_rust_analyzer_developer(format!(
|
||||
"overly long loop turn took {loop_duration:?}: {event_dbg}"
|
||||
"overly long loop turn took {loop_duration:?}: {event_dbg_msg}"
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn prime_caches(&mut self, cause: String) {
|
||||
tracing::debug!(%cause, "will prime caches");
|
||||
let num_worker_threads = self.config.prime_caches_num_threads();
|
||||
|
||||
self.task_pool.handle.spawn_with_sender({
|
||||
let analysis = self.snapshot().analysis;
|
||||
move |sender| {
|
||||
sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
|
||||
let res = analysis.parallel_prime_caches(num_worker_threads, |progress| {
|
||||
let report = PrimeCachesProgress::Report(progress);
|
||||
sender.send(Task::PrimeCaches(report)).unwrap();
|
||||
});
|
||||
sender
|
||||
.send(Task::PrimeCaches(PrimeCachesProgress::End { cancelled: res.is_err() }))
|
||||
.unwrap();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn update_status_or_notify(&mut self) {
|
||||
let status = self.current_status();
|
||||
if self.last_reported_status.as_ref() != Some(&status) {
|
||||
@ -410,7 +419,11 @@ fn update_status_or_notify(&mut self) {
|
||||
|
||||
if self.config.server_status_notification() {
|
||||
self.send_notification::<lsp_ext::ServerStatusNotification>(status);
|
||||
} else if let (health, Some(message)) = (status.health, &status.message) {
|
||||
} else if let (
|
||||
health @ (lsp_ext::Health::Warning | lsp_ext::Health::Error),
|
||||
Some(message),
|
||||
) = (status.health, &status.message)
|
||||
{
|
||||
let open_log_button = tracing::enabled!(tracing::Level::ERROR)
|
||||
&& (self.fetch_build_data_error().is_err()
|
||||
|| self.fetch_workspace_error().is_err());
|
||||
@ -589,21 +602,18 @@ fn handle_flycheck_msg(&mut self, message: flycheck::Message) {
|
||||
(Progress::Begin, None)
|
||||
}
|
||||
flycheck::Progress::DidCheckCrate(target) => (Progress::Report, Some(target)),
|
||||
flycheck::Progress::DidCancel => (Progress::End, None),
|
||||
flycheck::Progress::DidCancel => {
|
||||
self.last_flycheck_error = None;
|
||||
(Progress::End, None)
|
||||
}
|
||||
flycheck::Progress::DidFailToRestart(err) => {
|
||||
self.show_and_log_error(
|
||||
"cargo check failed to start".to_string(),
|
||||
Some(err),
|
||||
);
|
||||
self.last_flycheck_error =
|
||||
Some(format!("cargo check failed to start: {err}"));
|
||||
return;
|
||||
}
|
||||
flycheck::Progress::DidFinish(result) => {
|
||||
if let Err(err) = result {
|
||||
self.show_and_log_error(
|
||||
"cargo check failed".to_string(),
|
||||
Some(err.to_string()),
|
||||
);
|
||||
}
|
||||
self.last_flycheck_error =
|
||||
result.err().map(|err| format!("cargo check failed to start: {err}"));
|
||||
(Progress::End, None)
|
||||
}
|
||||
};
|
||||
|
@ -27,6 +27,7 @@
|
||||
use itertools::Itertools;
|
||||
use proc_macro_api::{MacroDylib, ProcMacroServer};
|
||||
use project_model::{PackageRoot, ProjectWorkspace, WorkspaceBuildScripts};
|
||||
use stdx::format_to;
|
||||
use syntax::SmolStr;
|
||||
use triomphe::Arc;
|
||||
use vfs::{file_set::FileSetConfig, AbsPath, AbsPathBuf, ChangeKind};
|
||||
@ -134,6 +135,15 @@ pub(crate) fn current_status(&self) -> lsp_ext::ServerStatusParams {
|
||||
message.push_str("Failed to discover workspace.\n");
|
||||
message.push_str("Consider adding the `Cargo.toml` of the workspace to the [`linkedProjects`](https://rust-analyzer.github.io/manual.html#rust-analyzer.linkedProjects) setting.\n\n");
|
||||
}
|
||||
if let Some(err) = &self.config_errors {
|
||||
status.health = lsp_ext::Health::Warning;
|
||||
format_to!(message, "{err}\n");
|
||||
}
|
||||
if let Some(err) = &self.last_flycheck_error {
|
||||
status.health = lsp_ext::Health::Warning;
|
||||
message.push_str(err);
|
||||
message.push('\n');
|
||||
}
|
||||
|
||||
for ws in self.workspaces.iter() {
|
||||
let (ProjectWorkspace::Cargo { sysroot, .. }
|
||||
|
Loading…
Reference in New Issue
Block a user