Skip to content

Commit f21c084

Browse files
committed
Move diagnostics out from QueryJob and optimize for the case with no diagnostics
1 parent b8c8f0b commit f21c084

File tree

5 files changed

+67
-46
lines changed

5 files changed

+67
-46
lines changed

src/librustc/dep_graph/graph.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -696,7 +696,7 @@ impl DepGraph {
696696

697697
// Promote the previous diagnostics to the current session.
698698
tcx.queries.on_disk_cache
699-
.store_diagnostics(dep_node_index, diagnostics.clone());
699+
.store_diagnostics(dep_node_index, diagnostics.clone().into());
700700

701701
for diagnostic in diagnostics {
702702
DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();

src/librustc/ty/context.rs

+12-3
Original file line numberDiff line numberDiff line change
@@ -1673,6 +1673,7 @@ impl<'gcx> GlobalCtxt<'gcx> {
16731673
let new_icx = ty::tls::ImplicitCtxt {
16741674
tcx,
16751675
query: icx.query.clone(),
1676+
diagnostics: icx.diagnostics,
16761677
layout_depth: icx.layout_depth,
16771678
task_deps: icx.task_deps,
16781679
};
@@ -1782,6 +1783,7 @@ pub mod tls {
17821783
use errors::{Diagnostic, TRACK_DIAGNOSTICS};
17831784
use rustc_data_structures::OnDrop;
17841785
use rustc_data_structures::sync::{self, Lrc, Lock};
1786+
use rustc_data_structures::thin_vec::ThinVec;
17851787
use dep_graph::TaskDeps;
17861788

17871789
#[cfg(not(parallel_queries))]
@@ -1801,10 +1803,14 @@ pub mod tls {
18011803
/// by `enter_local` with a new local interner
18021804
pub tcx: TyCtxt<'tcx, 'gcx, 'tcx>,
18031805

1804-
/// The current query job, if any. This is updated by start_job in
1806+
/// The current query job, if any. This is updated by JobOwner::start in
18051807
/// ty::query::plumbing when executing a query
18061808
pub query: Option<Lrc<query::QueryJob<'gcx>>>,
18071809

1810+
/// Where to store diagnostics for the current query job, if any.
1811+
/// This is updated by JobOwner::start in ty::query::plumbing when executing a query
1812+
pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,
1813+
18081814
/// Used to prevent layout from recursing too deeply.
18091815
pub layout_depth: usize,
18101816

@@ -1870,8 +1876,9 @@ pub mod tls {
18701876
fn track_diagnostic(diagnostic: &Diagnostic) {
18711877
with_context_opt(|icx| {
18721878
if let Some(icx) = icx {
1873-
if let Some(ref query) = icx.query {
1874-
query.diagnostics.lock().push(diagnostic.clone());
1879+
if let Some(ref diagnostics) = icx.diagnostics {
1880+
let mut diagnostics = diagnostics.lock();
1881+
diagnostics.extend(Some(diagnostic.clone()));
18751882
}
18761883
}
18771884
})
@@ -1938,6 +1945,7 @@ pub mod tls {
19381945
let icx = ImplicitCtxt {
19391946
tcx,
19401947
query: None,
1948+
diagnostics: None,
19411949
layout_depth: 0,
19421950
task_deps: None,
19431951
};
@@ -1967,6 +1975,7 @@ pub mod tls {
19671975
};
19681976
let icx = ImplicitCtxt {
19691977
query: None,
1978+
diagnostics: None,
19701979
tcx,
19711980
layout_depth: 0,
19721981
task_deps: None,

src/librustc/ty/query/job.rs

-5
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ use ty::query::{
1414
config::QueryDescription,
1515
};
1616
use ty::context::TyCtxt;
17-
use errors::Diagnostic;
1817
use std::process;
1918
use std::{fmt, ptr};
2019

@@ -54,9 +53,6 @@ pub struct QueryJob<'tcx> {
5453
/// The parent query job which created this job and is implicitly waiting on it.
5554
pub parent: Option<Lrc<QueryJob<'tcx>>>,
5655

57-
/// Diagnostic messages which are emitted while the query executes
58-
pub diagnostics: Lock<Vec<Diagnostic>>,
59-
6056
/// The latch which is used to wait on this job
6157
#[cfg(parallel_queries)]
6258
latch: QueryLatch<'tcx>,
@@ -66,7 +62,6 @@ impl<'tcx> QueryJob<'tcx> {
6662
/// Creates a new query job
6763
pub fn new(info: QueryInfo<'tcx>, parent: Option<Lrc<QueryJob<'tcx>>>) -> Self {
6864
QueryJob {
69-
diagnostics: Lock::new(Vec::new()),
7065
info,
7166
parent,
7267
#[cfg(parallel_queries)]

src/librustc/ty/query/on_disk_cache.rs

+10-7
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ use ich::{CachingSourceMapView, Fingerprint};
77
use mir::{self, interpret};
88
use mir::interpret::{AllocDecodingSession, AllocDecodingState};
99
use rustc_data_structures::fx::FxHashMap;
10+
use rustc_data_structures::thin_vec::ThinVec;
1011
use rustc_data_structures::sync::{Lrc, Lock, HashMapExt, Once};
1112
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
1213
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque,
@@ -341,11 +342,13 @@ impl<'sess> OnDiskCache<'sess> {
341342
/// Store a diagnostic emitted during the current compilation session.
342343
/// Anything stored like this will be available via `load_diagnostics` in
343344
/// the next compilation session.
345+
#[inline(never)]
346+
#[cold]
344347
pub fn store_diagnostics(&self,
345348
dep_node_index: DepNodeIndex,
346-
diagnostics: Vec<Diagnostic>) {
349+
diagnostics: ThinVec<Diagnostic>) {
347350
let mut current_diagnostics = self.current_diagnostics.borrow_mut();
348-
let prev = current_diagnostics.insert(dep_node_index, diagnostics);
351+
let prev = current_diagnostics.insert(dep_node_index, diagnostics.into());
349352
debug_assert!(prev.is_none());
350353
}
351354

@@ -367,16 +370,16 @@ impl<'sess> OnDiskCache<'sess> {
367370
/// Since many anonymous queries can share the same `DepNode`, we aggregate
368371
/// them -- as opposed to regular queries where we assume that there is a
369372
/// 1:1 relationship between query-key and `DepNode`.
373+
#[inline(never)]
374+
#[cold]
370375
pub fn store_diagnostics_for_anon_node(&self,
371376
dep_node_index: DepNodeIndex,
372-
mut diagnostics: Vec<Diagnostic>) {
377+
diagnostics: ThinVec<Diagnostic>) {
373378
let mut current_diagnostics = self.current_diagnostics.borrow_mut();
374379

375-
let x = current_diagnostics.entry(dep_node_index).or_insert_with(|| {
376-
mem::replace(&mut diagnostics, Vec::new())
377-
});
380+
let x = current_diagnostics.entry(dep_node_index).or_insert(Vec::new());
378381

379-
x.extend(diagnostics.into_iter());
382+
x.extend(Into::<Vec<_>>::into(diagnostics));
380383
}
381384

382385
fn load_indexed<'tcx, T>(&self,

src/librustc/ty/query/plumbing.rs

+44-30
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ use util::common::{profq_msg, ProfileQueriesMsg, QueryMsg};
1818

1919
use rustc_data_structures::fx::{FxHashMap};
2020
use rustc_data_structures::sync::{Lrc, Lock};
21+
use rustc_data_structures::thin_vec::ThinVec;
2122
use std::mem;
2223
use std::ptr;
2324
use std::collections::hash_map::Entry;
@@ -195,19 +196,21 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
195196
pub(super) fn start<'lcx, F, R>(
196197
&self,
197198
tcx: TyCtxt<'_, 'tcx, 'lcx>,
199+
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
198200
compute: F)
199-
-> (R, Vec<Diagnostic>)
201+
-> R
200202
where
201203
F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'lcx>) -> R
202204
{
203205
// The TyCtxt stored in TLS has the same global interner lifetime
204206
// as `tcx`, so we use `with_related_context` to relate the 'gcx lifetimes
205207
// when accessing the ImplicitCtxt
206-
let r = tls::with_related_context(tcx, move |current_icx| {
208+
tls::with_related_context(tcx, move |current_icx| {
207209
// Update the ImplicitCtxt to point to our new query job
208210
let new_icx = tls::ImplicitCtxt {
209211
tcx: tcx.global_tcx(),
210212
query: Some(self.job.clone()),
213+
diagnostics,
211214
layout_depth: current_icx.layout_depth,
212215
task_deps: current_icx.task_deps,
213216
};
@@ -216,13 +219,19 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> {
216219
tls::enter_context(&new_icx, |_| {
217220
compute(tcx)
218221
})
219-
});
222+
})
223+
}
220224

221-
// Extract the diagnostic from the job
222-
let diagnostics = mem::replace(&mut *self.job.diagnostics.lock(), Vec::new());
225+
}
223226

224-
(r, diagnostics)
225-
}
227+
#[inline(always)]
228+
fn with_diagnostics<F, R>(f: F) -> (R, ThinVec<Diagnostic>)
229+
where
230+
F: FnOnce(Option<&Lock<ThinVec<Diagnostic>>>) -> R
231+
{
232+
let diagnostics = Lock::new(ThinVec::new());
233+
let result = f(Some(&diagnostics));
234+
(result, diagnostics.into_inner())
226235
}
227236

228237
impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> {
@@ -402,20 +411,23 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
402411
profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
403412
self.sess.profiler(|p| p.start_activity(Q::CATEGORY));
404413

405-
let res = job.start(self, |tcx| {
406-
tcx.dep_graph.with_anon_task(dep_node.kind, || {
407-
Q::compute(tcx.global_tcx(), key)
414+
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
415+
job.start(self, diagnostics, |tcx| {
416+
tcx.dep_graph.with_anon_task(dep_node.kind, || {
417+
Q::compute(tcx.global_tcx(), key)
418+
})
408419
})
409420
});
410421

411422
self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
412423
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
413-
let ((result, dep_node_index), diagnostics) = res;
414424

415425
self.dep_graph.read_index(dep_node_index);
416426

417-
self.queries.on_disk_cache
418-
.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
427+
if unlikely!(!diagnostics.is_empty()) {
428+
self.queries.on_disk_cache
429+
.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
430+
}
419431

420432
job.complete(&result, dep_node_index);
421433

@@ -487,7 +499,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
487499
// The diagnostics for this query have already been
488500
// promoted to the current session during
489501
// try_mark_green(), so we can ignore them here.
490-
let (result, _) = job.start(self, |tcx| {
502+
let result = job.start(self, None, |tcx| {
491503
// The dep-graph for this computation is already in
492504
// place
493505
tcx.dep_graph.with_ignore(|| {
@@ -566,32 +578,34 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
566578
profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
567579
self.sess.profiler(|p| p.start_activity(Q::CATEGORY));
568580

569-
let res = job.start(self, |tcx| {
570-
if dep_node.kind.is_eval_always() {
571-
tcx.dep_graph.with_eval_always_task(dep_node,
572-
tcx,
573-
key,
574-
Q::compute)
575-
} else {
576-
tcx.dep_graph.with_task(dep_node,
577-
tcx,
578-
key,
579-
Q::compute)
580-
}
581+
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
582+
job.start(self, diagnostics, |tcx| {
583+
if dep_node.kind.is_eval_always() {
584+
tcx.dep_graph.with_eval_always_task(dep_node,
585+
tcx,
586+
key,
587+
Q::compute)
588+
} else {
589+
tcx.dep_graph.with_task(dep_node,
590+
tcx,
591+
key,
592+
Q::compute)
593+
}
594+
})
581595
});
582596

583597
self.sess.profiler(|p| p.end_activity(Q::CATEGORY));
584598
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
585599

586-
let ((result, dep_node_index), diagnostics) = res;
587-
588600
if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
589601
self.dep_graph.mark_loaded_from_cache(dep_node_index, false);
590602
}
591603

592604
if dep_node.kind != ::dep_graph::DepKind::Null {
593-
self.queries.on_disk_cache
594-
.store_diagnostics(dep_node_index, diagnostics);
605+
if unlikely!(!diagnostics.is_empty()) {
606+
self.queries.on_disk_cache
607+
.store_diagnostics(dep_node_index, diagnostics);
608+
}
595609
}
596610

597611
job.complete(&result, dep_node_index);

0 commit comments

Comments
 (0)