@@ -224,8 +224,8 @@ struct DetachedEntry<X: Cx> {
224
224
result : X :: Result ,
225
225
}
226
226
227
- /// Stores the stack depth of a currently evaluated goal *and* already
228
- /// computed results for goals which depend on other goals still on the stack.
227
+ /// Stores the provisional result of already computed results for goals which
228
+ /// depend on other goals still on the stack.
229
229
///
230
230
/// The provisional result may depend on whether the stack above it is inductive
231
231
/// or coinductive. Because of this, we store separate provisional results for
@@ -238,16 +238,13 @@ struct DetachedEntry<X: Cx> {
238
238
/// see tests/ui/traits/next-solver/cycles/provisional-cache-impacts-behavior.rs.
239
239
#[ derive_where( Default ; X : Cx ) ]
240
240
struct ProvisionalCacheEntry < X : Cx > {
241
- stack_depth : Option < StackDepth > ,
242
241
with_inductive_stack : Option < DetachedEntry < X > > ,
243
242
with_coinductive_stack : Option < DetachedEntry < X > > ,
244
243
}
245
244
246
245
impl < X : Cx > ProvisionalCacheEntry < X > {
247
246
fn is_empty ( & self ) -> bool {
248
- self . stack_depth . is_none ( )
249
- && self . with_inductive_stack . is_none ( )
250
- && self . with_coinductive_stack . is_none ( )
247
+ self . with_inductive_stack . is_none ( ) && self . with_coinductive_stack . is_none ( )
251
248
}
252
249
}
253
250
@@ -378,71 +375,26 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
378
375
None
379
376
} ;
380
377
381
- // Check whether the goal is in the provisional cache.
382
- // The provisional result may rely on the path to its cycle roots,
383
- // so we have to check the path of the current goal matches that of
384
- // the cache entry.
385
- let cache_entry = self . provisional_cache . entry ( input) . or_default ( ) ;
386
- if let Some ( entry) = cache_entry
387
- . with_coinductive_stack
388
- . as_ref ( )
389
- . filter ( |p| Self :: stack_coinductive_from ( cx, & self . stack , p. head ) )
390
- . or_else ( || {
391
- cache_entry
392
- . with_inductive_stack
393
- . as_ref ( )
394
- . filter ( |p| !Self :: stack_coinductive_from ( cx, & self . stack , p. head ) )
395
- } )
396
- {
397
- debug ! ( "provisional cache hit" ) ;
398
- // We have a nested goal which is already in the provisional cache, use
399
- // its result. We do not provide any usage kind as that should have been
400
- // already set correctly while computing the cache entry.
401
- debug_assert ! ( self . stack[ entry. head] . has_been_used. is_some( ) ) ;
402
- Self :: tag_cycle_participants ( & mut self . stack , entry. head ) ;
403
- return entry. result ;
404
- } else if let Some ( stack_depth) = cache_entry. stack_depth {
405
- debug ! ( "encountered cycle with depth {stack_depth:?}" ) ;
406
- // We have a nested goal which directly relies on a goal deeper in the stack.
407
- //
408
- // We start by tagging all cycle participants, as that's necessary for caching.
409
- //
410
- // Finally we can return either the provisional response or the initial response
411
- // in case we're in the first fixpoint iteration for this goal.
412
- let is_coinductive_cycle = Self :: stack_coinductive_from ( cx, & self . stack , stack_depth) ;
413
- let cycle_kind =
414
- if is_coinductive_cycle { CycleKind :: Coinductive } else { CycleKind :: Inductive } ;
415
- let usage_kind = UsageKind :: Single ( cycle_kind) ;
416
- self . stack [ stack_depth] . has_been_used = Some (
417
- self . stack [ stack_depth]
418
- . has_been_used
419
- . map_or ( usage_kind, |prev| prev. merge ( usage_kind) ) ,
420
- ) ;
421
- Self :: tag_cycle_participants ( & mut self . stack , stack_depth) ;
422
-
423
- // Return the provisional result or, if we're in the first iteration,
424
- // start with no constraints.
425
- return if let Some ( result) = self . stack [ stack_depth] . provisional_result {
426
- result
427
- } else {
428
- D :: initial_provisional_result ( cx, cycle_kind, input)
429
- } ;
430
- } else {
431
- // No entry, we push this goal on the stack and try to prove it.
432
- let depth = self . stack . next_index ( ) ;
433
- let entry = StackEntry {
434
- input,
435
- available_depth,
436
- reached_depth : depth,
437
- non_root_cycle_participant : None ,
438
- encountered_overflow : false ,
439
- has_been_used : None ,
440
- nested_goals : Default :: default ( ) ,
441
- provisional_result : None ,
442
- } ;
443
- assert_eq ! ( self . stack. push( entry) , depth) ;
444
- cache_entry. stack_depth = Some ( depth) ;
378
+ if let Some ( result) = self . lookup_provisional_cache ( cx, input) {
379
+ return result;
380
+ }
381
+
382
+ if let Some ( result) = self . check_cycle_on_stack ( cx, input) {
383
+ return result;
384
+ }
385
+
386
+ let depth = self . stack . next_index ( ) ;
387
+ let entry = StackEntry {
388
+ input,
389
+ available_depth,
390
+ reached_depth : depth,
391
+ non_root_cycle_participant : None ,
392
+ encountered_overflow : false ,
393
+ has_been_used : None ,
394
+ nested_goals : Default :: default ( ) ,
395
+ provisional_result : None ,
445
396
} ;
397
+ assert_eq ! ( self . stack. push( entry) , depth) ;
446
398
447
399
// This is for global caching, so we properly track query dependencies.
448
400
// Everything that affects the `result` should be performed within this
@@ -474,8 +426,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
474
426
debug_assert ! ( validate_cache. is_none( ) ) ;
475
427
let coinductive_stack = Self :: stack_coinductive_from ( cx, & self . stack , head) ;
476
428
477
- let entry = self . provisional_cache . get_mut ( & input) . unwrap ( ) ;
478
- entry. stack_depth = None ;
429
+ let entry = self . provisional_cache . entry ( input) . or_default ( ) ;
479
430
if coinductive_stack {
480
431
entry. with_coinductive_stack = Some ( DetachedEntry { head, result } ) ;
481
432
} else {
@@ -534,35 +485,52 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
534
485
} )
535
486
}
536
487
537
- /// When encountering a cycle, both inductive and coinductive, we only
538
- /// move the root into the global cache. We also store all other cycle
539
- /// participants involved.
540
- ///
541
- /// We must not use the global cache entry of a root goal if a cycle
542
- /// participant is on the stack. This is necessary to prevent unstable
543
- /// results. See the comment of `StackEntry::nested_goals` for
544
- /// more details.
545
- fn insert_global_cache (
546
- & mut self ,
547
- cx : X ,
548
- input : X :: Input ,
549
- final_entry : StackEntry < X > ,
550
- result : X :: Result ,
551
- dep_node : X :: DepNodeIndex ,
552
- ) {
553
- let additional_depth = final_entry. reached_depth . as_usize ( ) - self . stack . len ( ) ;
554
- debug ! ( ?final_entry, ?result, "insert global cache" ) ;
555
- cx. with_global_cache ( self . mode , |cache| {
556
- cache. insert (
557
- cx,
558
- input,
559
- result,
560
- dep_node,
561
- additional_depth,
562
- final_entry. encountered_overflow ,
563
- & final_entry. nested_goals ,
564
- )
565
- } )
488
+ fn lookup_provisional_cache ( & mut self , cx : X , input : X :: Input ) -> Option < X :: Result > {
489
+ let cache_entry = self . provisional_cache . get ( & input) ?;
490
+ let & DetachedEntry { head, result } = cache_entry
491
+ . with_coinductive_stack
492
+ . as_ref ( )
493
+ . filter ( |p| Self :: stack_coinductive_from ( cx, & self . stack , p. head ) )
494
+ . or_else ( || {
495
+ cache_entry
496
+ . with_inductive_stack
497
+ . as_ref ( )
498
+ . filter ( |p| !Self :: stack_coinductive_from ( cx, & self . stack , p. head ) )
499
+ } ) ?;
500
+
501
+ debug ! ( "provisional cache hit" ) ;
502
+ // We have a nested goal which is already in the provisional cache, use
503
+ // its result. We do not provide any usage kind as that should have been
504
+ // already set correctly while computing the cache entry.
505
+ Self :: tag_cycle_participants ( & mut self . stack , head) ;
506
+ debug_assert ! ( self . stack[ head] . has_been_used. is_some( ) ) ;
507
+ Some ( result)
508
+ }
509
+
510
+ fn check_cycle_on_stack ( & mut self , cx : X , input : X :: Input ) -> Option < X :: Result > {
511
+ let ( head, _stack_entry) = self . stack . iter_enumerated ( ) . find ( |( _, e) | e. input == input) ?;
512
+ debug ! ( "encountered cycle with depth {head:?}" ) ;
513
+ // We have a nested goal which directly relies on a goal deeper in the stack.
514
+ //
515
+ // We start by tagging all cycle participants, as that's necessary for caching.
516
+ //
517
+ // Finally we can return either the provisional response or the initial response
518
+ // in case we're in the first fixpoint iteration for this goal.
519
+ let is_coinductive_cycle = Self :: stack_coinductive_from ( cx, & self . stack , head) ;
520
+ let cycle_kind =
521
+ if is_coinductive_cycle { CycleKind :: Coinductive } else { CycleKind :: Inductive } ;
522
+ let usage_kind = UsageKind :: Single ( cycle_kind) ;
523
+ self . stack [ head] . has_been_used =
524
+ Some ( self . stack [ head] . has_been_used . map_or ( usage_kind, |prev| prev. merge ( usage_kind) ) ) ;
525
+ Self :: tag_cycle_participants ( & mut self . stack , head) ;
526
+
527
+ // Return the provisional result or, if we're in the first iteration,
528
+ // start with no constraints.
529
+ if let Some ( result) = self . stack [ head] . provisional_result {
530
+ Some ( result)
531
+ } else {
532
+ Some ( D :: initial_provisional_result ( cx, cycle_kind, input) )
533
+ }
566
534
}
567
535
568
536
/// When we encounter a coinductive cycle, we have to fetch the
@@ -613,13 +581,43 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> {
613
581
StepResult :: Done ( stack_entry, result)
614
582
} else {
615
583
debug ! ( ?result, "fixpoint changed provisional results" ) ;
616
- let depth = self . stack . push ( StackEntry {
584
+ self . stack . push ( StackEntry {
617
585
has_been_used : None ,
618
586
provisional_result : Some ( result) ,
619
587
..stack_entry
620
588
} ) ;
621
- debug_assert_eq ! ( self . provisional_cache[ & input] . stack_depth, Some ( depth) ) ;
622
589
StepResult :: HasChanged
623
590
}
624
591
}
592
+
593
+ /// When encountering a cycle, both inductive and coinductive, we only
594
+ /// move the root into the global cache. We also store all other cycle
595
+ /// participants involved.
596
+ ///
597
+ /// We must not use the global cache entry of a root goal if a cycle
598
+ /// participant is on the stack. This is necessary to prevent unstable
599
+ /// results. See the comment of `StackEntry::nested_goals` for
600
+ /// more details.
601
+ fn insert_global_cache (
602
+ & mut self ,
603
+ cx : X ,
604
+ input : X :: Input ,
605
+ final_entry : StackEntry < X > ,
606
+ result : X :: Result ,
607
+ dep_node : X :: DepNodeIndex ,
608
+ ) {
609
+ let additional_depth = final_entry. reached_depth . as_usize ( ) - self . stack . len ( ) ;
610
+ debug ! ( ?final_entry, ?result, "insert global cache" ) ;
611
+ cx. with_global_cache ( self . mode , |cache| {
612
+ cache. insert (
613
+ cx,
614
+ input,
615
+ result,
616
+ dep_node,
617
+ additional_depth,
618
+ final_entry. encountered_overflow ,
619
+ & final_entry. nested_goals ,
620
+ )
621
+ } )
622
+ }
625
623
}
0 commit comments