Skip to content

Commit f9c9512

Browse files
authored
Rollup merge of rust-lang#58309 - wesleywiser:add_more_profiler_events, r=michaelwoerister
Add more profiler events - Adds Start\Stop events for time spent loading incremental query results from disk. - Adds Start\Stop events for time spent blocked waiting for queries to complete (when parallel queries are enabled). r? @michaelwoerister
2 parents c67d474 + e9ebc2e commit f9c9512

File tree

2 files changed

+72
-20
lines changed

2 files changed

+72
-20
lines changed

src/librustc/ty/query/plumbing.rs

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,15 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
124124
let job = match lock.active.entry((*key).clone()) {
125125
Entry::Occupied(entry) => {
126126
match *entry.get() {
127-
QueryResult::Started(ref job) => job.clone(),
127+
QueryResult::Started(ref job) => {
128+
//For parallel queries, we'll block and wait until the query running
129+
//in another thread has completed. Record how long we wait in the
130+
//self-profiler
131+
#[cfg(parallel_compiler)]
132+
tcx.sess.profiler(|p| p.query_blocked_start(Q::NAME, Q::CATEGORY));
133+
134+
job.clone()
135+
},
128136
QueryResult::Poisoned => FatalError.raise(),
129137
}
130138
}
@@ -160,7 +168,10 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
160168
// thread
161169
#[cfg(parallel_compiler)]
162170
{
163-
if let Err(cycle) = job.r#await(tcx, span) {
171+
let result = job.r#await(tcx, span);
172+
tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME, Q::CATEGORY));
173+
174+
if let Err(cycle) = result {
164175
return TryGetJob::JobCompleted(Err(cycle));
165176
}
166177
}
@@ -441,7 +452,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
441452
// First we try to load the result from the on-disk cache
442453
let result = if Q::cache_on_disk(self.global_tcx(), key.clone()) &&
443454
self.sess.opts.debugging_opts.incremental_queries {
455+
self.sess.profiler(|p| p.incremental_load_result_start(Q::NAME));
444456
let result = Q::try_load_from_disk(self.global_tcx(), prev_dep_node_index);
457+
self.sess.profiler(|p| p.incremental_load_result_end(Q::NAME));
445458

446459
// We always expect to find a cached result for things that
447460
// can be forced from DepNode.

src/librustc/util/profiling.rs

Lines changed: 57 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -25,16 +25,28 @@ pub enum ProfilerEvent {
2525
GenericActivityEnd { category: ProfileCategory, time: Instant },
2626
QueryCacheHit { query_name: &'static str, category: ProfileCategory },
2727
QueryCount { query_name: &'static str, category: ProfileCategory, count: usize },
28+
IncrementalLoadResultStart { query_name: &'static str, time: Instant },
29+
IncrementalLoadResultEnd { query_name: &'static str, time: Instant },
30+
QueryBlockedStart { query_name: &'static str, category: ProfileCategory, time: Instant },
31+
QueryBlockedEnd { query_name: &'static str, category: ProfileCategory, time: Instant },
2832
}
2933

3034
impl ProfilerEvent {
3135
fn is_start_event(&self) -> bool {
3236
use self::ProfilerEvent::*;
3337

3438
match self {
35-
QueryStart { .. } | GenericActivityStart { .. } => true,
36-
QueryEnd { .. } | GenericActivityEnd { .. } |
37-
QueryCacheHit { .. } | QueryCount { .. } => false,
39+
QueryStart { .. } |
40+
GenericActivityStart { .. } |
41+
IncrementalLoadResultStart { .. } |
42+
QueryBlockedStart { .. } => true,
43+
44+
QueryEnd { .. } |
45+
GenericActivityEnd { .. } |
46+
QueryCacheHit { .. } |
47+
QueryCount { .. } |
48+
IncrementalLoadResultEnd { .. } |
49+
QueryBlockedEnd { .. } => false,
3850
}
3951
}
4052
}
@@ -57,12 +69,7 @@ impl CategoryResultData {
5769
}
5870

5971
fn total_time(&self) -> u64 {
60-
let mut total = 0;
61-
for (_, time) in &self.query_times {
62-
total += time;
63-
}
64-
65-
total
72+
self.query_times.iter().map(|(_, time)| time).sum()
6673
}
6774

6875
fn total_cache_data(&self) -> (u64, u64) {
@@ -121,13 +128,7 @@ impl CalculatedResults {
121128
}
122129

123130
fn total_time(&self) -> u64 {
124-
let mut total = 0;
125-
126-
for (_, data) in &self.categories {
127-
total += data.total_time();
128-
}
129-
130-
total
131+
self.categories.iter().map(|(_, data)| data.total_time()).sum()
131132
}
132133

133134
fn with_options(mut self, opts: &Options) -> CalculatedResults {
@@ -225,6 +226,40 @@ impl SelfProfiler {
225226
})
226227
}
227228

229+
#[inline]
230+
pub fn incremental_load_result_start(&mut self, query_name: &'static str) {
231+
self.record(ProfilerEvent::IncrementalLoadResultStart {
232+
query_name,
233+
time: Instant::now(),
234+
})
235+
}
236+
237+
#[inline]
238+
pub fn incremental_load_result_end(&mut self, query_name: &'static str) {
239+
self.record(ProfilerEvent::IncrementalLoadResultEnd {
240+
query_name,
241+
time: Instant::now(),
242+
})
243+
}
244+
245+
#[inline]
246+
pub fn query_blocked_start(&mut self, query_name: &'static str, category: ProfileCategory) {
247+
self.record(ProfilerEvent::QueryBlockedStart {
248+
query_name,
249+
category,
250+
time: Instant::now(),
251+
})
252+
}
253+
254+
#[inline]
255+
pub fn query_blocked_end(&mut self, query_name: &'static str, category: ProfileCategory) {
256+
self.record(ProfilerEvent::QueryBlockedEnd {
257+
query_name,
258+
category,
259+
time: Instant::now(),
260+
})
261+
}
262+
228263
#[inline]
229264
fn record(&mut self, event: ProfilerEvent) {
230265
let thread_id = std::thread::current().id();
@@ -317,6 +352,10 @@ impl SelfProfiler {
317352
result_data.query_cache_stats.entry(query_name).or_insert((0, 0));
318353
*totals += *count as u64;
319354
},
355+
//we don't summarize incremental load result events in the simple output mode
356+
IncrementalLoadResultStart { .. } | IncrementalLoadResultEnd { .. } => { },
357+
//we don't summarize parallel query blocking in the simple output mode
358+
QueryBlockedStart { .. } | QueryBlockedEnd { .. } => { },
320359
}
321360
}
322361

@@ -361,9 +400,9 @@ impl SelfProfiler {
361400
.unwrap();
362401

363402
let mut categories: Vec<_> = results.categories.iter().collect();
364-
categories.sort_by(|(_, data1), (_, data2)| data2.total_time().cmp(&data1.total_time()));
403+
categories.sort_by_cached_key(|(_, d)| d.total_time());
365404

366-
for (category, data) in categories {
405+
for (category, data) in categories.iter().rev() {
367406
let (category_hits, category_total) = data.total_cache_data();
368407
let category_hit_percent = calculate_percent(category_hits, category_total);
369408

0 commit comments

Comments
 (0)