Use appropriate QoS classes throughout the codebase

This commit is contained in:
Luna Razzaghipour 2023-05-25 20:55:42 +10:00
parent 2924fd2213
commit d0b001eed2
No known key found for this signature in database
9 changed files with 20 additions and 19 deletions

View File

@ -90,7 +90,7 @@ impl FlycheckHandle {
) -> FlycheckHandle { ) -> FlycheckHandle {
let actor = FlycheckActor::new(id, sender, config, workspace_root); let actor = FlycheckActor::new(id, sender, config, workspace_root);
let (sender, receiver) = unbounded::<StateChange>(); let (sender, receiver) = unbounded::<StateChange>();
let thread = stdx::thread::Builder::new(stdx::thread::QoSClass::Default) let thread = stdx::thread::Builder::new(stdx::thread::QoSClass::Utility)
.name("Flycheck".to_owned()) .name("Flycheck".to_owned())
.spawn(move || actor.run(receiver)) .spawn(move || actor.run(receiver))
.expect("failed to spawn thread"); .expect("failed to spawn thread");
@ -409,7 +409,7 @@ impl CargoHandle {
let (sender, receiver) = unbounded(); let (sender, receiver) = unbounded();
let actor = CargoActor::new(sender, stdout, stderr); let actor = CargoActor::new(sender, stdout, stderr);
let thread = stdx::thread::Builder::new(stdx::thread::QoSClass::Default) let thread = stdx::thread::Builder::new(stdx::thread::QoSClass::Utility)
.name("CargoHandle".to_owned()) .name("CargoHandle".to_owned())
.spawn(move || actor.run()) .spawn(move || actor.run())
.expect("failed to spawn thread"); .expect("failed to spawn thread");

View File

@ -81,7 +81,7 @@ pub(crate) fn parallel_prime_caches(
let worker = prime_caches_worker.clone(); let worker = prime_caches_worker.clone();
let db = db.snapshot(); let db = db.snapshot();
stdx::thread::Builder::new(stdx::thread::QoSClass::Default) stdx::thread::Builder::new(stdx::thread::QoSClass::Utility)
.allow_leak(true) .allow_leak(true)
.spawn(move || Cancelled::catch(|| worker(db))) .spawn(move || Cancelled::catch(|| worker(db)))
.expect("failed to spawn thread"); .expect("failed to spawn thread");

View File

@ -85,7 +85,7 @@ fn try_main(flags: flags::RustAnalyzer) -> Result<()> {
// will make actions like hitting enter in the editor slow. // will make actions like hitting enter in the editor slow.
// rust-analyzer does not block the editors render loop, // rust-analyzer does not block the editors render loop,
// so we dont use User Interactive. // so we dont use User Interactive.
with_extra_thread("LspServer", stdx::thread::QoSClass::Default, run_server)?; with_extra_thread("LspServer", stdx::thread::QoSClass::UserInitiated, run_server)?;
} }
flags::RustAnalyzerCmd::Parse(cmd) => cmd.run()?, flags::RustAnalyzerCmd::Parse(cmd) => cmd.run()?,
flags::RustAnalyzerCmd::Symbols(cmd) => cmd.run()?, flags::RustAnalyzerCmd::Symbols(cmd) => cmd.run()?,

View File

@ -88,7 +88,8 @@ impl<'a> RequestDispatcher<'a> {
self self
} }
/// Dispatches the request onto thread pool /// Dispatches a non-latency-sensitive request onto the thread pool
/// without retrying it if it panics.
pub(crate) fn on_no_retry<R>( pub(crate) fn on_no_retry<R>(
&mut self, &mut self,
f: fn(GlobalStateSnapshot, R::Params) -> Result<R::Result>, f: fn(GlobalStateSnapshot, R::Params) -> Result<R::Result>,
@ -103,7 +104,7 @@ impl<'a> RequestDispatcher<'a> {
None => return self, None => return self,
}; };
self.global_state.task_pool.handle.spawn(QoSClass::Default, { self.global_state.task_pool.handle.spawn(QoSClass::Utility, {
let world = self.global_state.snapshot(); let world = self.global_state.snapshot();
move || { move || {
let result = panic::catch_unwind(move || { let result = panic::catch_unwind(move || {
@ -124,7 +125,7 @@ impl<'a> RequestDispatcher<'a> {
self self
} }
/// Dispatches the request onto thread pool /// Dispatches a non-latency-sensitive request onto the thread pool.
pub(crate) fn on<R>( pub(crate) fn on<R>(
&mut self, &mut self,
f: fn(GlobalStateSnapshot, R::Params) -> Result<R::Result>, f: fn(GlobalStateSnapshot, R::Params) -> Result<R::Result>,
@ -134,7 +135,7 @@ impl<'a> RequestDispatcher<'a> {
R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug, R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug,
R::Result: Serialize, R::Result: Serialize,
{ {
self.on_with_qos::<R>(QoSClass::Default, f) self.on_with_qos::<R>(QoSClass::Utility, f)
} }
/// Dispatches a latency-sensitive request onto the thread pool. /// Dispatches a latency-sensitive request onto the thread pool.
@ -147,7 +148,7 @@ impl<'a> RequestDispatcher<'a> {
R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug, R::Params: DeserializeOwned + panic::UnwindSafe + Send + fmt::Debug,
R::Result: Serialize, R::Result: Serialize,
{ {
self.on_with_qos::<R>(QoSClass::Default, f) self.on_with_qos::<R>(QoSClass::UserInitiated, f)
} }
pub(crate) fn finish(&mut self) { pub(crate) fn finish(&mut self) {

View File

@ -291,7 +291,7 @@ fn run_flycheck(state: &mut GlobalState, vfs_path: VfsPath) -> bool {
} }
Ok(()) Ok(())
}; };
state.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Default, move |_| { state.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Utility, move |_| {
if let Err(e) = std::panic::catch_unwind(task) { if let Err(e) = std::panic::catch_unwind(task) {
tracing::error!("flycheck task panicked: {e:?}") tracing::error!("flycheck task panicked: {e:?}")
} }

View File

@ -397,7 +397,7 @@ impl GlobalState {
tracing::debug!(%cause, "will prime caches"); tracing::debug!(%cause, "will prime caches");
let num_worker_threads = self.config.prime_caches_num_threads(); let num_worker_threads = self.config.prime_caches_num_threads();
self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Default, { self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Utility, {
let analysis = self.snapshot().analysis; let analysis = self.snapshot().analysis;
move |sender| { move |sender| {
sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap(); sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
@ -787,7 +787,10 @@ impl GlobalState {
tracing::trace!("updating notifications for {:?}", subscriptions); tracing::trace!("updating notifications for {:?}", subscriptions);
let snapshot = self.snapshot(); let snapshot = self.snapshot();
self.task_pool.handle.spawn(stdx::thread::QoSClass::Default, move || {
// Diagnostics are triggered by the user typing
// so we want computing them to run at the User Initiated QoS.
self.task_pool.handle.spawn(stdx::thread::QoSClass::UserInitiated, move || {
let _p = profile::span("publish_diagnostics"); let _p = profile::span("publish_diagnostics");
let diagnostics = subscriptions let diagnostics = subscriptions
.into_iter() .into_iter()

View File

@ -185,7 +185,7 @@ impl GlobalState {
pub(crate) fn fetch_workspaces(&mut self, cause: Cause) { pub(crate) fn fetch_workspaces(&mut self, cause: Cause) {
tracing::info!(%cause, "will fetch workspaces"); tracing::info!(%cause, "will fetch workspaces");
self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Default, { self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Utility, {
let linked_projects = self.config.linked_projects(); let linked_projects = self.config.linked_projects();
let detached_files = self.config.detached_files().to_vec(); let detached_files = self.config.detached_files().to_vec();
let cargo_config = self.config.cargo(); let cargo_config = self.config.cargo();
@ -260,7 +260,7 @@ impl GlobalState {
tracing::info!(%cause, "will fetch build data"); tracing::info!(%cause, "will fetch build data");
let workspaces = Arc::clone(&self.workspaces); let workspaces = Arc::clone(&self.workspaces);
let config = self.config.cargo(); let config = self.config.cargo();
self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Default, move |sender| { self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Utility, move |sender| {
sender.send(Task::FetchBuildData(BuildDataProgress::Begin)).unwrap(); sender.send(Task::FetchBuildData(BuildDataProgress::Begin)).unwrap();
let progress = { let progress = {
@ -280,7 +280,7 @@ impl GlobalState {
let dummy_replacements = self.config.dummy_replacements().clone(); let dummy_replacements = self.config.dummy_replacements().clone();
let proc_macro_clients = self.proc_macro_clients.clone(); let proc_macro_clients = self.proc_macro_clients.clone();
self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Default, move |sender| { self.task_pool.handle.spawn_with_sender(stdx::thread::QoSClass::Utility, move |sender| {
sender.send(Task::LoadProcMacros(ProcMacroProgress::Begin)).unwrap(); sender.send(Task::LoadProcMacros(ProcMacroProgress::Begin)).unwrap();
let dummy_replacements = &dummy_replacements; let dummy_replacements = &dummy_replacements;

View File

@ -155,8 +155,6 @@ pub enum QoSClass {
/// performance, responsiveness and efficiency. /// performance, responsiveness and efficiency.
Utility, Utility,
Default,
/// TLDR: tasks that block using your app /// TLDR: tasks that block using your app
/// ///
/// Contract: /// Contract:
@ -234,7 +232,6 @@ mod imp {
let c = match class { let c = match class {
QoSClass::UserInteractive => libc::qos_class_t::QOS_CLASS_USER_INTERACTIVE, QoSClass::UserInteractive => libc::qos_class_t::QOS_CLASS_USER_INTERACTIVE,
QoSClass::UserInitiated => libc::qos_class_t::QOS_CLASS_USER_INITIATED, QoSClass::UserInitiated => libc::qos_class_t::QOS_CLASS_USER_INITIATED,
QoSClass::Default => libc::qos_class_t::QOS_CLASS_DEFAULT,
QoSClass::Utility => libc::qos_class_t::QOS_CLASS_UTILITY, QoSClass::Utility => libc::qos_class_t::QOS_CLASS_UTILITY,
QoSClass::Background => libc::qos_class_t::QOS_CLASS_BACKGROUND, QoSClass::Background => libc::qos_class_t::QOS_CLASS_BACKGROUND,
}; };

View File

@ -34,7 +34,7 @@ impl loader::Handle for NotifyHandle {
fn spawn(sender: loader::Sender) -> NotifyHandle { fn spawn(sender: loader::Sender) -> NotifyHandle {
let actor = NotifyActor::new(sender); let actor = NotifyActor::new(sender);
let (sender, receiver) = unbounded::<Message>(); let (sender, receiver) = unbounded::<Message>();
let thread = stdx::thread::Builder::new(stdx::thread::QoSClass::Default) let thread = stdx::thread::Builder::new(stdx::thread::QoSClass::Utility)
.name("VfsLoader".to_owned()) .name("VfsLoader".to_owned())
.spawn(move || actor.run(receiver)) .spawn(move || actor.run(receiver))
.expect("failed to spawn thread"); .expect("failed to spawn thread");