diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 5ebeae35964..6d76d09f619 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -83,17 +83,12 @@ fn start_query( &self, token: QueryJobId, diagnostics: Option<&Lock>>, - read_allowed: bool, compute: impl FnOnce() -> R, ) -> R { // The `TyCtxt` stored in TLS has the same global interner lifetime // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes // when accessing the `ImplicitCtxt`. tls::with_related_context(**self, move |current_icx| { - let mut old_read_allowed = false; - if let Some(task_deps) = current_icx.task_deps { - old_read_allowed = std::mem::replace(&mut task_deps.lock().read_allowed, read_allowed); - } // Update the `ImplicitCtxt` to point to our new query job. let new_icx = ImplicitCtxt { tcx: **self, @@ -104,14 +99,9 @@ fn start_query( }; // Use the `ImplicitCtxt` while we execute the query. - let res = tls::enter_context(&new_icx, |_| { + tls::enter_context(&new_icx, |_| { rustc_data_structures::stack::ensure_sufficient_stack(compute) - }); - - if let Some(task_deps) = new_icx.task_deps { - task_deps.lock().read_allowed = old_read_allowed; - } - res + }) }) } } diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index 265e0b80d7c..a2f7843baaa 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -142,7 +142,6 @@ fn start_query( &self, token: QueryJobId, diagnostics: Option<&Lock>>, - read_allowed: bool, compute: impl FnOnce() -> R, ) -> R; } diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index fd22698e419..cd31c5b3f08 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -2,6 +2,7 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. +use crate::dep_graph::DepKind; use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams, TaskDeps}; use crate::query::caches::QueryCache; use crate::query::config::{QueryDescription, QueryVtable}; @@ -9,7 +10,6 @@ report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId, }; use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; -use crate::dep_graph::DepKind; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHasher}; #[cfg(parallel_compiler)] @@ -440,7 +440,7 @@ fn execute_job( // Fast path for when incr. comp. is off. if !dep_graph.is_fully_enabled() { let prof_timer = tcx.dep_context().profiler().query_provider(); - let result = tcx.start_query(job_id, None, true, || query.compute(*tcx.dep_context(), key)); + let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key)); let dep_node_index = dep_graph.next_virtual_depnode_index(); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); return (result, dep_node_index); @@ -453,7 +453,7 @@ fn execute_job( // The diagnostics for this query will be promoted to the current session during // `try_mark_green()`, so we can ignore them here. - if let Some(ret) = tcx.start_query(job_id, None, false, || { + if let Some(ret) = tcx.start_query(job_id, None, || { try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query) }) { return ret; @@ -463,7 +463,7 @@ fn execute_job( let prof_timer = tcx.dep_context().profiler().query_provider(); let diagnostics = Lock::new(ThinVec::new()); - let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), true, || { + let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || { if query.anon { return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || { query.compute(*tcx.dep_context(), key)