@@ -15,8 +15,9 @@ use rustc_attr as attr;
1515use rustc_data_structures:: fx:: FxHashMap ;
1616use rustc_data_structures:: profiling:: { get_resident_set_size, print_time_passes_entry} ;
1717
18+ use rustc_data_structures:: sync:: par_iter;
1819#[ cfg( parallel_compiler) ]
19- use rustc_data_structures:: sync:: { par_iter , ParallelIterator } ;
20+ use rustc_data_structures:: sync:: ParallelIterator ;
2021use rustc_hir as hir;
2122use rustc_hir:: def_id:: { DefId , LOCAL_CRATE } ;
2223use rustc_hir:: lang_items:: LangItem ;
@@ -612,6 +613,9 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
612613 codegen_units. iter ( ) . map ( |cgu| determine_cgu_reuse ( tcx, & cgu) ) . collect :: < Vec < _ > > ( )
613614 } ) ;
614615
616+ let mut total_codegen_time = Duration :: new ( 0 , 0 ) ;
617+ let start_rss = tcx. sess . time_passes ( ) . then ( || get_resident_set_size ( ) ) ;
618+
615619 // The non-parallel compiler can only translate codegen units to LLVM IR
616620 // on a single thread, leading to a staircase effect where the N LLVM
617621 // threads have to wait on the single codegen threads to generate work
@@ -622,8 +626,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
622626 // This likely is a temporary measure. Once we don't have to support the
623627 // non-parallel compiler anymore, we can compile CGUs end-to-end in
624628 // parallel and get rid of the complicated scheduling logic.
625- #[ cfg( parallel_compiler) ]
626- let pre_compile_cgus = || {
629+ let mut pre_compiled_cgus = if cfg ! ( parallel_compiler) {
627630 tcx. sess . time ( "compile_first_CGU_batch" , || {
628631 // Try to find one CGU to compile per thread.
629632 let cgus: Vec < _ > = cgu_reuse
@@ -643,43 +646,31 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
643646 } )
644647 . collect ( ) ;
645648
646- ( pre_compiled_cgus, start_time. elapsed ( ) )
649+ total_codegen_time += start_time. elapsed ( ) ;
650+
651+ pre_compiled_cgus
647652 } )
653+ } else {
654+ FxHashMap :: default ( )
648655 } ;
649656
650- #[ cfg( not( parallel_compiler) ) ]
651- let pre_compile_cgus = || ( FxHashMap :: default ( ) , Duration :: new ( 0 , 0 ) ) ;
652-
653- let mut pre_compiled_cgus: Option < FxHashMap < usize , _ > > = None ;
654- let mut total_codegen_time = Duration :: new ( 0 , 0 ) ;
655- let start_rss = tcx. sess . time_passes ( ) . then ( || get_resident_set_size ( ) ) ;
656-
657657 for ( i, cgu) in codegen_units. iter ( ) . enumerate ( ) {
658658 ongoing_codegen. wait_for_signal_to_codegen_item ( ) ;
659659 ongoing_codegen. check_for_errors ( tcx. sess ) ;
660660
661- // Do some setup work in the first iteration
662- if pre_compiled_cgus. is_none ( ) {
663- // Pre compile some CGUs
664- let ( compiled_cgus, codegen_time) = pre_compile_cgus ( ) ;
665- pre_compiled_cgus = Some ( compiled_cgus) ;
666- total_codegen_time += codegen_time;
667- }
668-
669661 let cgu_reuse = cgu_reuse[ i] ;
670662 tcx. sess . cgu_reuse_tracker . set_actual_reuse ( cgu. name ( ) . as_str ( ) , cgu_reuse) ;
671663
672664 match cgu_reuse {
673665 CguReuse :: No => {
674- let ( module, cost) =
675- if let Some ( cgu) = pre_compiled_cgus. as_mut ( ) . unwrap ( ) . remove ( & i) {
676- cgu
677- } else {
678- let start_time = Instant :: now ( ) ;
679- let module = backend. compile_codegen_unit ( tcx, cgu. name ( ) ) ;
680- total_codegen_time += start_time. elapsed ( ) ;
681- module
682- } ;
666+ let ( module, cost) = if let Some ( cgu) = pre_compiled_cgus. remove ( & i) {
667+ cgu
668+ } else {
669+ let start_time = Instant :: now ( ) ;
670+ let module = backend. compile_codegen_unit ( tcx, cgu. name ( ) ) ;
671+ total_codegen_time += start_time. elapsed ( ) ;
672+ module
673+ } ;
683674 // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
684675 // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
685676 // compilation hang on post-monomorphization errors.
0 commit comments