internal: Give rustfmt jobs a separate thread

This commit is contained in:
Lukas Wirth 2023-06-11 19:56:24 +02:00
parent b7497fcdfa
commit 52bb94d697
4 changed files with 54 additions and 29 deletions

View File

@ -135,7 +135,7 @@ pub(crate) fn on<R>(
R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug,
R::Result: Serialize,
{
self.on_with_thread_intent::<R>(ThreadIntent::Worker, f)
self.on_with_thread_intent::<true, R>(ThreadIntent::Worker, f)
}
/// Dispatches a latency-sensitive request onto the thread pool.
@ -148,7 +148,22 @@ pub(crate) fn on_latency_sensitive<R>(
R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug,
R::Result: Serialize,
{
self.on_with_thread_intent::<R>(ThreadIntent::LatencySensitive, f)
self.on_with_thread_intent::<true, R>(ThreadIntent::LatencySensitive, f)
}
/// Formatting requests should never block on waiting a for task thread to open up, editors will wait
/// on the response and a late formatting update might mess with the document and user.
/// We can't run this on the main thread though as we invoke rustfmt which may take arbitrary time to complete!
pub(crate) fn on_fmt_thread<R>(
&mut self,
f: fn(GlobalStateSnapshot, R::Params) -> Result<R::Result>,
) -> &mut Self
where
R: lsp_types::request::Request + 'static,
R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug,
R::Result: Serialize,
{
self.on_with_thread_intent::<false, R>(ThreadIntent::LatencySensitive, f)
}
pub(crate) fn finish(&mut self) {
@ -163,7 +178,7 @@ pub(crate) fn finish(&mut self) {
}
}
fn on_with_thread_intent<R>(
fn on_with_thread_intent<const MAIN_POOL: bool, R>(
&mut self,
intent: ThreadIntent,
f: fn(GlobalStateSnapshot, R::Params) -> Result<R::Result>,
@ -178,17 +193,20 @@ fn on_with_thread_intent<R>(
None => return self,
};
self.global_state.task_pool.handle.spawn(intent, {
let world = self.global_state.snapshot();
move || {
let result = panic::catch_unwind(move || {
let _pctx = stdx::panic_context::enter(panic_context);
f(world, params)
});
match thread_result_to_response::<R>(req.id.clone(), result) {
Ok(response) => Task::Response(response),
Err(_) => Task::Retry(req),
}
let world = self.global_state.snapshot();
if MAIN_POOL {
&mut self.global_state.task_pool.handle
} else {
&mut self.global_state.fmt_pool.handle
}
.spawn(intent, move || {
let result = panic::catch_unwind(move || {
let _pctx = stdx::panic_context::enter(panic_context);
f(world, params)
});
match thread_result_to_response::<R>(req.id.clone(), result) {
Ok(response) => Task::Response(response),
Err(_) => Task::Retry(req),
}
});

View File

@ -54,6 +54,7 @@ pub(crate) struct GlobalState {
req_queue: ReqQueue,
pub(crate) task_pool: Handle<TaskPool<Task>, Receiver<Task>>,
pub(crate) fmt_pool: Handle<TaskPool<Task>, Receiver<Task>>,
pub(crate) config: Arc<Config>,
pub(crate) config_errors: Option<ConfigError>,
@ -151,6 +152,11 @@ pub(crate) fn new(sender: Sender<lsp_server::Message>, config: Config) -> Global
let handle = TaskPool::new_with_threads(sender, config.main_loop_num_threads());
Handle { handle, receiver }
};
let fmt_pool = {
let (sender, receiver) = unbounded();
let handle = TaskPool::new_with_threads(sender, 1);
Handle { handle, receiver }
};
let mut analysis_host = AnalysisHost::new(config.lru_parse_query_capacity());
if let Some(capacities) = config.lru_query_capacities() {
@ -161,6 +167,7 @@ pub(crate) fn new(sender: Sender<lsp_server::Message>, config: Config) -> Global
sender,
req_queue: ReqQueue::default(),
task_pool,
fmt_pool,
loader,
config: Arc::new(config.clone()),
analysis_host,

View File

@ -18,12 +18,11 @@
use lsp_types::{
CallHierarchyIncomingCall, CallHierarchyIncomingCallsParams, CallHierarchyItem,
CallHierarchyOutgoingCall, CallHierarchyOutgoingCallsParams, CallHierarchyPrepareParams,
CodeLens, CompletionItem, DocumentFormattingParams, FoldingRange, FoldingRangeParams,
HoverContents, InlayHint, InlayHintParams, Location, LocationLink, Position,
PrepareRenameResponse, Range, RenameParams, SemanticTokensDeltaParams,
SemanticTokensFullDeltaResult, SemanticTokensParams, SemanticTokensRangeParams,
SemanticTokensRangeResult, SemanticTokensResult, SymbolInformation, SymbolTag,
TextDocumentIdentifier, Url, WorkspaceEdit,
CodeLens, CompletionItem, FoldingRange, FoldingRangeParams, HoverContents, InlayHint,
InlayHintParams, Location, LocationLink, Position, PrepareRenameResponse, Range, RenameParams,
SemanticTokensDeltaParams, SemanticTokensFullDeltaResult, SemanticTokensParams,
SemanticTokensRangeParams, SemanticTokensRangeResult, SemanticTokensResult, SymbolInformation,
SymbolTag, TextDocumentIdentifier, Url, WorkspaceEdit,
};
use project_model::{ManifestPath, ProjectWorkspace, TargetKind};
use serde_json::json;
@ -1077,7 +1076,7 @@ pub(crate) fn handle_references(
pub(crate) fn handle_formatting(
snap: GlobalStateSnapshot,
params: DocumentFormattingParams,
params: lsp_types::DocumentFormattingParams,
) -> Result<Option<Vec<lsp_types::TextEdit>>> {
let _p = profile::span("handle_formatting");

View File

@ -175,6 +175,9 @@ fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
msg.ok().map(Event::Lsp),
recv(self.task_pool.receiver) -> task =>
Some(Event::Task(task.unwrap())),
recv(self.fmt_pool.receiver) -> task =>
Some(Event::Task(task.unwrap())),
recv(self.loader.receiver) -> task =>
@ -678,6 +681,12 @@ fn on_request(&mut self, req: Request) {
.on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range)
.on_sync::<lsp_ext::MatchingBrace>(handlers::handle_matching_brace)
.on_sync::<lsp_ext::OnTypeFormatting>(handlers::handle_on_type_formatting)
// Formatting should be done immediately as the editor might wait on it, but we can't
// put it on the main thread as we do not want the main thread to block on rustfmt.
// So we have an extra thread just for formatting requests to make sure it gets handled
// as fast as possible.
.on_fmt_thread::<lsp_types::request::Formatting>(handlers::handle_formatting)
.on_fmt_thread::<lsp_types::request::RangeFormatting>(handlers::handle_range_formatting)
// We cant run latency-sensitive request handlers which do semantic
// analysis on the main thread because that would block other
// requests. Instead, we run these request handlers on higher priority
@ -695,14 +704,6 @@ fn on_request(&mut self, req: Request) {
.on_latency_sensitive::<lsp_types::request::SemanticTokensRangeRequest>(
handlers::handle_semantic_tokens_range,
)
// Formatting is not caused by the user typing,
// but it does qualify as latency-sensitive
// because a delay before formatting is applied
// can be confusing for the user.
.on_latency_sensitive::<lsp_types::request::Formatting>(handlers::handle_formatting)
.on_latency_sensitive::<lsp_types::request::RangeFormatting>(
handlers::handle_range_formatting,
)
// All other request handlers
.on::<lsp_ext::FetchDependencyList>(handlers::fetch_dependency_list)
.on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)