diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs index 3df4dfb74b3..0d9100f63b1 100644 --- a/compiler/rustc_span/src/hygiene.rs +++ b/compiler/rustc_span/src/hygiene.rs @@ -644,7 +644,10 @@ pub fn debug_hygiene_data(verbose: bool) -> String { let expn_data = expn_data.as_ref().expect("no expansion data for an expansion ID"); debug_expn_data((&id.to_expn_id(), expn_data)) }); + // Sort the hash map for more reproducible output. + // Because of this, it is fine to rely on the unstable iteration order of the map. + #[allow(rustc::potential_query_instability)] let mut foreign_expn_data: Vec<_> = data.foreign_expn_data.iter().collect(); foreign_expn_data.sort_by_key(|(id, _)| (id.krate, id.local_id)); foreign_expn_data.into_iter().for_each(debug_expn_data); @@ -1210,6 +1213,7 @@ pub fn encode( // It's fine to iterate over a HashMap, because the serialization // of the table that we insert data into doesn't depend on insertion // order + #[allow(rustc::potential_query_instability)] for_all_ctxts_in(latest_ctxts.into_iter(), |index, ctxt, data| { if self.serialized_ctxts.lock().insert(ctxt) { encode_ctxt(encoder, index, data); @@ -1218,6 +1222,8 @@ pub fn encode( let latest_expns = { std::mem::take(&mut *self.latest_expns.lock()) }; + // Same as above, this is fine as we are inserting into a order-independent hashset + #[allow(rustc::potential_query_instability)] for_all_expns_in(latest_expns.into_iter(), |expn, data, hash| { if self.serialized_expns.lock().insert(expn) { encode_expn(encoder, expn, data, hash); diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs index a1f34287a5f..a8074b5b1ba 100644 --- a/compiler/rustc_span/src/lib.rs +++ b/compiler/rustc_span/src/lib.rs @@ -20,7 +20,6 @@ #![feature(negative_impls)] #![feature(min_specialization)] #![feature(rustc_attrs)] -#![allow(rustc::potential_query_instability)] #[macro_use] extern crate rustc_macros;