1
- /// The search graph is responsible for caching and cycle detection in the trait
2
- /// solver. Making sure that caching doesn't result in soundness bugs or unstable
3
- /// query results is very challenging and makes this one of the most-involved
4
- /// self-contained components of the compiler.
5
- ///
6
- /// We added fuzzing support to test its correctness. The fuzzers used to verify
7
- /// the current implementation can be found in https://github.com/lcnr/search_graph_fuzz.
8
- ///
9
- /// This is just a quick overview of the general design, please check out the relevant
10
- /// [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
11
- /// more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
12
- /// The global cache has to be completely unobservable, while the per-cycle cache may impact
13
- /// behavior as long as the resulting behavior is still correct.
1
+ //! The search graph is responsible for caching and cycle detection in the trait
2
+ //! solver. Making sure that caching doesn't result in soundness bugs or unstable
3
+ //! query results is very challenging and makes this one of the most-involved
4
+ //! self-contained components of the compiler.
5
+ //!
6
+ //! We added fuzzing support to test its correctness. The fuzzers used to verify
7
+ //! the current implementation can be found in < https://github.com/lcnr/search_graph_fuzz> .
8
+ //!
9
+ //! This is just a quick overview of the general design, please check out the relevant
10
+ //! [rustc-dev-guide chapter](https://rustc-dev-guide.rust-lang.org/solve/caching.html) for
11
+ //! more details. Caching is split between a global cache and the per-cycle `provisional_cache`.
12
+ //! The global cache has to be completely unobservable, while the per-cycle cache may impact
13
+ //! behavior as long as the resulting behavior is still correct.
14
14
use std:: cmp:: Ordering ;
15
15
use std:: collections:: BTreeMap ;
16
16
use std:: collections:: hash_map:: Entry ;
@@ -381,18 +381,16 @@ impl PathsToNested {
381
381
/// The nested goals of each stack entry and the path from the
382
382
/// stack entry to that nested goal.
383
383
///
384
+ /// They are used when checking whether reevaluating a global cache
385
+ /// would encounter a cycle or use a provisional cache entry given the
386
+ /// currentl search graph state. We need to disable the global cache
387
+ /// in this case as it could otherwise result in behaviorial differences.
388
+ /// Cycles can impact behavior. The cycle ABA may have different final
389
+ /// results from a the cycle BAB depending on the cycle root.
390
+ ///
384
391
/// We only start tracking nested goals once we've either encountered
385
392
/// overflow or a solver cycle. This is a performance optimization to
386
393
/// avoid tracking nested goals on the happy path.
387
- ///
388
- /// We use nested goals for two reasons:
389
- /// - when rebasing provisional cache entries
390
- /// - when checking whether we have to ignore a global cache entry as reevaluating
391
- /// it would encounter a cycle or use a provisional cache entry.
392
- ///
393
- /// We need to disable the global cache if using it would hide a cycle, as
394
- /// cycles can impact behavior. The cycle ABA may have different final
395
- /// results from a the cycle BAB depending on the cycle root.
396
394
#[ derive_where( Debug , Default , Clone ; X : Cx ) ]
397
395
struct NestedGoals < X : Cx > {
398
396
nested_goals : HashMap < X :: Input , PathsToNested > ,
@@ -450,6 +448,43 @@ struct ProvisionalCacheEntry<X: Cx> {
450
448
result : X :: Result ,
451
449
}
452
450
451
+ /// The final result of evaluating a goal.
452
+ ///
453
+ /// We reset `encountered_overflow` when reevaluating a goal,
454
+ /// but need to track whether we've hit the recursion limit at
455
+ /// all for correctness.
456
+ ///
457
+ /// We've previously simply returned the final `StackEntry` but this
458
+ /// made it easy to accidentally drop information from the previous
459
+ /// evaluation.
460
+ #[ derive_where( Debug ; X : Cx ) ]
461
+ struct EvaluationResult < X : Cx > {
462
+ encountered_overflow : bool ,
463
+ required_depth : usize ,
464
+ heads : CycleHeads ,
465
+ nested_goals : NestedGoals < X > ,
466
+ result : X :: Result ,
467
+ }
468
+
469
+ impl < X : Cx > EvaluationResult < X > {
470
+ fn finalize (
471
+ final_entry : StackEntry < X > ,
472
+ encountered_overflow : bool ,
473
+ result : X :: Result ,
474
+ ) -> EvaluationResult < X > {
475
+ EvaluationResult {
476
+ encountered_overflow,
477
+ // Unlike `encountered_overflow`, we share `heads`, `required_depth`,
478
+ // and `nested_goals` between evaluations.
479
+ required_depth : final_entry. required_depth ,
480
+ heads : final_entry. heads ,
481
+ nested_goals : final_entry. nested_goals ,
482
+ // We only care about the final result.
483
+ result,
484
+ }
485
+ }
486
+ }
487
+
453
488
pub struct SearchGraph < D : Delegate < Cx = X > , X : Cx = <D as Delegate >:: Cx > {
454
489
root_depth : AvailableDepth ,
455
490
/// The stack of goals currently being computed.
@@ -562,7 +597,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
562
597
input : X :: Input ,
563
598
step_kind_from_parent : PathKind ,
564
599
inspect : & mut D :: ProofTreeBuilder ,
565
- mut evaluate_goal : impl FnMut ( & mut Self , & mut D :: ProofTreeBuilder ) -> X :: Result ,
600
+ evaluate_goal : impl Fn ( & mut Self , X , X :: Input , & mut D :: ProofTreeBuilder ) -> X :: Result + Copy ,
566
601
) -> X :: Result {
567
602
let Some ( available_depth) =
568
603
AvailableDepth :: allowed_depth_for_nested :: < D > ( self . root_depth , & self . stack )
@@ -616,12 +651,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
616
651
input,
617
652
step_kind_from_parent,
618
653
available_depth,
654
+ provisional_result : None ,
619
655
required_depth : 0 ,
620
656
heads : Default :: default ( ) ,
621
657
encountered_overflow : false ,
622
658
has_been_used : None ,
623
659
nested_goals : Default :: default ( ) ,
624
- provisional_result : None ,
625
660
} ) ;
626
661
627
662
// This is for global caching, so we properly track query dependencies.
@@ -630,35 +665,41 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
630
665
// not tracked by the cache key and from outside of this anon task, it
631
666
// must not be added to the global cache. Notably, this is the case for
632
667
// trait solver cycles participants.
633
- let ( ( final_entry, result) , dep_node) = cx. with_cached_task ( || {
634
- self . evaluate_goal_in_task ( cx, input, inspect, & mut evaluate_goal)
635
- } ) ;
668
+ let ( evaluation_result, dep_node) =
669
+ cx. with_cached_task ( || self . evaluate_goal_in_task ( cx, input, inspect, evaluate_goal) ) ;
636
670
637
671
// We've finished computing the goal and have popped it from the stack,
638
672
// lazily update its parent goal.
639
673
Self :: update_parent_goal (
640
674
& mut self . stack ,
641
- final_entry . step_kind_from_parent ,
642
- final_entry . required_depth ,
643
- & final_entry . heads ,
644
- final_entry . encountered_overflow ,
645
- UpdateParentGoalCtxt :: Ordinary ( & final_entry . nested_goals ) ,
675
+ step_kind_from_parent,
676
+ evaluation_result . required_depth ,
677
+ & evaluation_result . heads ,
678
+ evaluation_result . encountered_overflow ,
679
+ UpdateParentGoalCtxt :: Ordinary ( & evaluation_result . nested_goals ) ,
646
680
) ;
681
+ let result = evaluation_result. result ;
647
682
648
683
// We're now done with this goal. We only add the root of cycles to the global cache.
649
684
// In case this goal is involved in a larger cycle add it to the provisional cache.
650
- if final_entry . heads . is_empty ( ) {
685
+ if evaluation_result . heads . is_empty ( ) {
651
686
if let Some ( ( _scope, expected) ) = validate_cache {
652
687
// Do not try to move a goal into the cache again if we're testing
653
688
// the global cache.
654
- assert_eq ! ( result, expected, "input={input:?}" ) ;
689
+ assert_eq ! ( evaluation_result . result, expected, "input={input:?}" ) ;
655
690
} else if D :: inspect_is_noop ( inspect) {
656
- self . insert_global_cache ( cx, final_entry , result , dep_node)
691
+ self . insert_global_cache ( cx, input , evaluation_result , dep_node)
657
692
}
658
693
} else if D :: ENABLE_PROVISIONAL_CACHE {
659
694
debug_assert ! ( validate_cache. is_none( ) , "unexpected non-root: {input:?}" ) ;
660
695
let entry = self . provisional_cache . entry ( input) . or_default ( ) ;
661
- let StackEntry { heads, encountered_overflow, .. } = final_entry;
696
+ let EvaluationResult {
697
+ encountered_overflow,
698
+ required_depth : _,
699
+ heads,
700
+ nested_goals : _,
701
+ result,
702
+ } = evaluation_result;
662
703
let path_from_head = Self :: cycle_path_kind (
663
704
& self . stack ,
664
705
step_kind_from_parent,
@@ -1023,19 +1064,25 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
1023
1064
cx : X ,
1024
1065
input : X :: Input ,
1025
1066
inspect : & mut D :: ProofTreeBuilder ,
1026
- mut evaluate_goal : impl FnMut ( & mut Self , & mut D :: ProofTreeBuilder ) -> X :: Result ,
1027
- ) -> ( StackEntry < X > , X :: Result ) {
1067
+ evaluate_goal : impl Fn ( & mut Self , X , X :: Input , & mut D :: ProofTreeBuilder ) -> X :: Result + Copy ,
1068
+ ) -> EvaluationResult < X > {
1069
+ // We reset `encountered_overflow` each time we rerun this goal
1070
+ // but need to make sure we currently propagate it to the global
1071
+ // cache even if only some of the evaluations actually reach the
1072
+ // recursion limit.
1073
+ let mut encountered_overflow = false ;
1028
1074
let mut i = 0 ;
1029
1075
loop {
1030
- let result = evaluate_goal ( self , inspect) ;
1076
+ let result = evaluate_goal ( self , cx , input , inspect) ;
1031
1077
let stack_entry = self . stack . pop ( ) ;
1078
+ encountered_overflow |= stack_entry. encountered_overflow ;
1032
1079
debug_assert_eq ! ( stack_entry. input, input) ;
1033
1080
1034
1081
// If the current goal is not the root of a cycle, we are done.
1035
1082
//
1036
1083
// There are no provisional cache entries which depend on this goal.
1037
1084
let Some ( usage_kind) = stack_entry. has_been_used else {
1038
- return ( stack_entry, result) ;
1085
+ return EvaluationResult :: finalize ( stack_entry, encountered_overflow , result) ;
1039
1086
} ;
1040
1087
1041
1088
// If it is a cycle head, we have to keep trying to prove it until
@@ -1051,7 +1098,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
1051
1098
// final result is equal to the initial response for that case.
1052
1099
if self . reached_fixpoint ( cx, & stack_entry, usage_kind, result) {
1053
1100
self . rebase_provisional_cache_entries ( & stack_entry, |_, result| result) ;
1054
- return ( stack_entry, result) ;
1101
+ return EvaluationResult :: finalize ( stack_entry, encountered_overflow , result) ;
1055
1102
}
1056
1103
1057
1104
// If computing this goal results in ambiguity with no constraints,
@@ -1070,7 +1117,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
1070
1117
self . rebase_provisional_cache_entries ( & stack_entry, |input, _| {
1071
1118
D :: propagate_ambiguity ( cx, input, result)
1072
1119
} ) ;
1073
- return ( stack_entry, result) ;
1120
+ return EvaluationResult :: finalize ( stack_entry, encountered_overflow , result) ;
1074
1121
} ;
1075
1122
1076
1123
// If we've reached the fixpoint step limit, we bail with overflow and taint all
@@ -1082,7 +1129,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
1082
1129
self . rebase_provisional_cache_entries ( & stack_entry, |input, _| {
1083
1130
D :: on_fixpoint_overflow ( cx, input)
1084
1131
} ) ;
1085
- return ( stack_entry, result) ;
1132
+ return EvaluationResult :: finalize ( stack_entry, encountered_overflow , result) ;
1086
1133
}
1087
1134
1088
1135
// Clear all provisional cache entries which depend on a previous provisional
@@ -1091,9 +1138,22 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
1091
1138
1092
1139
debug ! ( ?result, "fixpoint changed provisional results" ) ;
1093
1140
self . stack . push ( StackEntry {
1094
- has_been_used : None ,
1141
+ input,
1142
+ step_kind_from_parent : stack_entry. step_kind_from_parent ,
1143
+ available_depth : stack_entry. available_depth ,
1095
1144
provisional_result : Some ( result) ,
1096
- ..stack_entry
1145
+ // We can keep these goals from previous iterations as they are only
1146
+ // ever read after finalizing this evaluation.
1147
+ required_depth : stack_entry. required_depth ,
1148
+ heads : stack_entry. heads ,
1149
+ nested_goals : stack_entry. nested_goals ,
1150
+ // We reset these two fields when rerunning this goal. We could
1151
+ // keep `encountered_overflow` as it's only used as a performance
1152
+ // optimization. However, given that the proof tree will likely look
1153
+ // similar to the previous iterations when reevaluating, it's better
1154
+ // for caching if the reevaluation also starts out with `false`.
1155
+ encountered_overflow : false ,
1156
+ has_been_used : None ,
1097
1157
} ) ;
1098
1158
}
1099
1159
}
@@ -1109,21 +1169,11 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
1109
1169
fn insert_global_cache (
1110
1170
& mut self ,
1111
1171
cx : X ,
1112
- final_entry : StackEntry < X > ,
1113
- result : X :: Result ,
1172
+ input : X :: Input ,
1173
+ evaluation_result : EvaluationResult < X > ,
1114
1174
dep_node : X :: DepNodeIndex ,
1115
1175
) {
1116
- debug ! ( ?final_entry, ?result, "insert global cache" ) ;
1117
- cx. with_global_cache ( |cache| {
1118
- cache. insert (
1119
- cx,
1120
- final_entry. input ,
1121
- result,
1122
- dep_node,
1123
- final_entry. required_depth ,
1124
- final_entry. encountered_overflow ,
1125
- final_entry. nested_goals ,
1126
- )
1127
- } )
1176
+ debug ! ( ?evaluation_result, "insert global cache" ) ;
1177
+ cx. with_global_cache ( |cache| cache. insert ( cx, input, evaluation_result, dep_node) )
1128
1178
}
1129
1179
}
0 commit comments