Rollup merge of #58309 - wesleywiser:add_more_profiler_events, r=michaelwoerister
Add more profiler events - Adds Start\Stop events for time spent loading incremental query results from disk. - Adds Start\Stop events for time spent blocked waiting for queries to complete (when parallel queries are enabled). r? @michaelwoerister
This commit is contained in:
commit
f9c9512628
@ -124,7 +124,15 @@ pub(super) fn try_get(
|
||||
let job = match lock.active.entry((*key).clone()) {
|
||||
Entry::Occupied(entry) => {
|
||||
match *entry.get() {
|
||||
QueryResult::Started(ref job) => job.clone(),
|
||||
QueryResult::Started(ref job) => {
|
||||
//For parallel queries, we'll block and wait until the query running
|
||||
//in another thread has completed. Record how long we wait in the
|
||||
//self-profiler
|
||||
#[cfg(parallel_compiler)]
|
||||
tcx.sess.profiler(|p| p.query_blocked_start(Q::NAME, Q::CATEGORY));
|
||||
|
||||
job.clone()
|
||||
},
|
||||
QueryResult::Poisoned => FatalError.raise(),
|
||||
}
|
||||
}
|
||||
@ -160,7 +168,10 @@ pub(super) fn try_get(
|
||||
// thread
|
||||
#[cfg(parallel_compiler)]
|
||||
{
|
||||
if let Err(cycle) = job.r#await(tcx, span) {
|
||||
let result = job.r#await(tcx, span);
|
||||
tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME, Q::CATEGORY));
|
||||
|
||||
if let Err(cycle) = result {
|
||||
return TryGetJob::JobCompleted(Err(cycle));
|
||||
}
|
||||
}
|
||||
@ -441,7 +452,9 @@ fn load_from_disk_and_cache_in_memory<Q: QueryDescription<'gcx>>(
|
||||
// First we try to load the result from the on-disk cache
|
||||
let result = if Q::cache_on_disk(self.global_tcx(), key.clone()) &&
|
||||
self.sess.opts.debugging_opts.incremental_queries {
|
||||
self.sess.profiler(|p| p.incremental_load_result_start(Q::NAME));
|
||||
let result = Q::try_load_from_disk(self.global_tcx(), prev_dep_node_index);
|
||||
self.sess.profiler(|p| p.incremental_load_result_end(Q::NAME));
|
||||
|
||||
// We always expect to find a cached result for things that
|
||||
// can be forced from DepNode.
|
||||
|
@ -25,6 +25,10 @@ pub enum ProfilerEvent {
|
||||
GenericActivityEnd { category: ProfileCategory, time: Instant },
|
||||
QueryCacheHit { query_name: &'static str, category: ProfileCategory },
|
||||
QueryCount { query_name: &'static str, category: ProfileCategory, count: usize },
|
||||
IncrementalLoadResultStart { query_name: &'static str, time: Instant },
|
||||
IncrementalLoadResultEnd { query_name: &'static str, time: Instant },
|
||||
QueryBlockedStart { query_name: &'static str, category: ProfileCategory, time: Instant },
|
||||
QueryBlockedEnd { query_name: &'static str, category: ProfileCategory, time: Instant },
|
||||
}
|
||||
|
||||
impl ProfilerEvent {
|
||||
@ -32,9 +36,17 @@ fn is_start_event(&self) -> bool {
|
||||
use self::ProfilerEvent::*;
|
||||
|
||||
match self {
|
||||
QueryStart { .. } | GenericActivityStart { .. } => true,
|
||||
QueryEnd { .. } | GenericActivityEnd { .. } |
|
||||
QueryCacheHit { .. } | QueryCount { .. } => false,
|
||||
QueryStart { .. } |
|
||||
GenericActivityStart { .. } |
|
||||
IncrementalLoadResultStart { .. } |
|
||||
QueryBlockedStart { .. } => true,
|
||||
|
||||
QueryEnd { .. } |
|
||||
GenericActivityEnd { .. } |
|
||||
QueryCacheHit { .. } |
|
||||
QueryCount { .. } |
|
||||
IncrementalLoadResultEnd { .. } |
|
||||
QueryBlockedEnd { .. } => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -57,12 +69,7 @@ fn new() -> CategoryResultData {
|
||||
}
|
||||
|
||||
fn total_time(&self) -> u64 {
|
||||
let mut total = 0;
|
||||
for (_, time) in &self.query_times {
|
||||
total += time;
|
||||
}
|
||||
|
||||
total
|
||||
self.query_times.iter().map(|(_, time)| time).sum()
|
||||
}
|
||||
|
||||
fn total_cache_data(&self) -> (u64, u64) {
|
||||
@ -121,13 +128,7 @@ fn consolidate(mut cr1: CalculatedResults, cr2: CalculatedResults) -> Calculated
|
||||
}
|
||||
|
||||
fn total_time(&self) -> u64 {
|
||||
let mut total = 0;
|
||||
|
||||
for (_, data) in &self.categories {
|
||||
total += data.total_time();
|
||||
}
|
||||
|
||||
total
|
||||
self.categories.iter().map(|(_, data)| data.total_time()).sum()
|
||||
}
|
||||
|
||||
fn with_options(mut self, opts: &Options) -> CalculatedResults {
|
||||
@ -225,6 +226,40 @@ pub fn end_query(&mut self, query_name: &'static str, category: ProfileCategory)
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn incremental_load_result_start(&mut self, query_name: &'static str) {
|
||||
self.record(ProfilerEvent::IncrementalLoadResultStart {
|
||||
query_name,
|
||||
time: Instant::now(),
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn incremental_load_result_end(&mut self, query_name: &'static str) {
|
||||
self.record(ProfilerEvent::IncrementalLoadResultEnd {
|
||||
query_name,
|
||||
time: Instant::now(),
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn query_blocked_start(&mut self, query_name: &'static str, category: ProfileCategory) {
|
||||
self.record(ProfilerEvent::QueryBlockedStart {
|
||||
query_name,
|
||||
category,
|
||||
time: Instant::now(),
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn query_blocked_end(&mut self, query_name: &'static str, category: ProfileCategory) {
|
||||
self.record(ProfilerEvent::QueryBlockedEnd {
|
||||
query_name,
|
||||
category,
|
||||
time: Instant::now(),
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn record(&mut self, event: ProfilerEvent) {
|
||||
let thread_id = std::thread::current().id();
|
||||
@ -317,6 +352,10 @@ fn calculate_thread_results(events: &Vec<ProfilerEvent>) -> CalculatedResults {
|
||||
result_data.query_cache_stats.entry(query_name).or_insert((0, 0));
|
||||
*totals += *count as u64;
|
||||
},
|
||||
//we don't summarize incremental load result events in the simple output mode
|
||||
IncrementalLoadResultStart { .. } | IncrementalLoadResultEnd { .. } => { },
|
||||
//we don't summarize parallel query blocking in the simple output mode
|
||||
QueryBlockedStart { .. } | QueryBlockedEnd { .. } => { },
|
||||
}
|
||||
}
|
||||
|
||||
@ -361,9 +400,9 @@ pub fn print_results(&mut self, opts: &Options) {
|
||||
.unwrap();
|
||||
|
||||
let mut categories: Vec<_> = results.categories.iter().collect();
|
||||
categories.sort_by(|(_, data1), (_, data2)| data2.total_time().cmp(&data1.total_time()));
|
||||
categories.sort_by_cached_key(|(_, d)| d.total_time());
|
||||
|
||||
for (category, data) in categories {
|
||||
for (category, data) in categories.iter().rev() {
|
||||
let (category_hits, category_total) = data.total_cache_data();
|
||||
let category_hit_percent = calculate_percent(category_hits, category_total);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user